#!/usr/bin/env python3

'''
    Quinema Rotoscopio
    expresiones faciales a OSC version 0.1 (alfa)
    Copyright (C) 2021  Ernesto Bazzano (bazza)

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU Affero General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU Affero General Public License for more details.

    You should have received a copy of the GNU Affero General Public License
    along with this program.  If not, see <https://www.gnu.org/licenses/>.

'''

try:
	import cv2
	# GUI ↓
	from tkinter import filedialog
	from tkinter import *
	import pickle
	# GUI ↑
	from imutils import face_utils
	import dlib
	dlib.DLIB_USE_CUDA
	import sys
	import numpy as np
	from oscpy.client import OSCClient
	import locale
	loc = locale.getlocale()
	if (locale.getdefaultlocale()[0][:2] == "es"):
		txt_save = "Guardar"
		txt_help0 = "Intente manter la cara dentro del circulo para guardar la expresiones,"
		txt_help1 = "presione las teclas del 0 al 9 salvar expresiones, s: guardar archivo, o: lee archivo, q: salir"
	else:
		txt_save = "Save"
		txt_help0 = "Try to keep your face inside the circle to save expressions"
		txt_help1 = "press 0 to 9 save expressions, s: save file, or: read file, q: exit"
	#import glob
	#import os
except:
	print("pip install opencv-python dlib imutils numpy sys pickle tk")
	print("download and expand http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2")
	quit()
address = "127.0.0.1"
port = 9001
osc = OSCClient(address, port)

video = 0
if len(sys.argv) > 1:
	if sys.argv[1] == "-h":
		print( sys.argv[0] + " (webcam:[0-9]|video) \n")
		exit()
	try:
		video = int(sys.argv[1])
	except:
		video = sys.argv[1]

try:
	dlib.DLIB_USE_CUDA = True
except:
	pass

def findEuclideanDistance(source_representation, test_representation):
	euclidean_distance = source_representation - test_representation
	euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))
	euclidean_distance = np.sqrt(euclidean_distance)
	return euclidean_distance

# suavisado
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
	import numpy as np
	from math import factorial
	window_size = np.abs(np.int32(window_size))
	order = np.abs(np.int32(order))
	if window_size % 2 != 1 or window_size < 1:
		window_size = window_size + 1
	if window_size < order + 2:
		raise TypeError("window_size is too small for the polynomials order")
	order_range = range(order+1)
	half_window = (window_size -1) // 2
	# precompute coefficients
	b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
	m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
	# pad the signal at the extremes with
	# values taken from the signal itself
	firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
	lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
	y = np.concatenate((firstvals, y, lastvals))
	return np.convolve( m[::-1], y, mode='valid')


detector = dlib.get_frontal_face_detector()
try:
	predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
except:
	print("download and expand http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2")
	quit()
# 3D model points.
model_points = np.array([
	(6.825897, 6.760612, 4.402142),  #33 left brow left corner
	(1.330353, 7.122144, 6.903745),  #29 left brow right corner
	(-1.330353, 7.122144, 6.903745), #34 right brow left corner
	(-6.825897, 6.760612, 4.402142), #38 right brow right corner
	(5.311432, 5.485328, 3.987654),  #13 left eye left corner
	(1.789930, 5.393625, 4.413414),  #17 left eye right corner
	(-1.789930, 5.393625, 4.413414), #25 right eye left corner
	(-5.311432, 5.485328, 3.987654), #21 right eye right corner
	(2.005628, 1.409845, 6.165652),  #55 nose left corner
	(-2.005628, 1.409845, 6.165652) #49 nose right corner
])
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
cap = cv2.VideoCapture(video)
if not (cap.isOpened()):
	cap = cv2.VideoCapture(1)
CH=480
CW=640
cap.set(cv2.CAP_PROP_FRAME_WIDTH, CW)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, CH)
suavizado=20
puerta=10

rx = np.array([])
ry = np.array([])
rz = np.array([])

px = np.array([])
py = np.array([])
pz = np.array([])

g = -1
expresion = [-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]
distancia = 0
cara = 0
cara1 = 0
expresionIB = 0
expresionI  = 0
salvar = False
ayuda = False
entrenar = True
fotograma = 0
filename="expresion.txt"
while True:
	mostrar_texto = False
	_, image = cap.read()
	im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	 
	dets = detector(im, 1)
	if (not(len(dets) == 0 or len(dets) > 1)) :
		#--------------------POSICION-------------------------------------

		shape = face_utils.shape_to_np(predictor(im, dets[0]))
		#2D image points. If you change the image, you need to change vector
		size = im.shape
		# Camera internals
		focal_length = size[1]
		center = (size[1]/2, size[0]/2)
		camera_matrix = np.array(
			[[focal_length, 0, center[0]],
			[0, focal_length, center[1]],
			[0, 0, 1]], dtype = "single"
		)
		image_points = np.array([
			(shape[17][0], shape[17][1]), #17 left brow left corner
			(shape[21][0], shape[21][1]), #21 left brow right corner
			(shape[22][0], shape[22][1]), #22 right brow left corner
			(shape[26][0], shape[26][1]), #26 right brow right corner
			(shape[36][0], shape[36][1]), #36 left eye left corner
			(shape[39][0], shape[39][1]), #39 left eye right corner
			(shape[42][0], shape[42][1]), #42 right eye left corner
			(shape[45][0], shape[45][1]), #45 right eye right corner
			(shape[31][0], shape[31][1]), #31 nose left corner
			(shape[35][0], shape[35][1]), #35 nose right corner

		], dtype="single")
		(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
		

		#--------------------SAVITZKY GOLAY--------------------------------
		# ROTACION XYZ
		if (rx.size < 1):
			for c in range(suavizado):
				rx = np.append(rx, rotation_vector[0])
				ry = np.append(ry, rotation_vector[2])
				rz = np.append(rz, rotation_vector[1]*-1)

		rx = np.append(rx[-suavizado:], rotation_vector[0])
		ry = np.append(ry[-suavizado:], rotation_vector[2])
		rz = np.append(rz[-suavizado:], rotation_vector[1]*-1)

		rx = savitzky_golay(rx, suavizado, puerta)
		ry = savitzky_golay(ry, suavizado, puerta)
		rz = savitzky_golay(rz, suavizado, puerta)		

		translation_vector[0] = translation_vector[0]/-10
		translation_vector[1] = translation_vector[1]/50
		translation_vector[2] = translation_vector[2]/-10
		#------------------------------------------------------------------
		# POSICION XYZ
		if (px.size < 1):
			for c in range(suavizado):
				px = np.append(px, translation_vector[0])
				py = np.append(py, translation_vector[2])
				pz = np.append(pz, translation_vector[1])

		px = np.append(px[-suavizado:], translation_vector[0])
		py = np.append(py[-suavizado:], translation_vector[2])
		pz = np.append(pz[-suavizado:], translation_vector[1])
	
		px = savitzky_golay(px, suavizado, puerta)
		py = savitzky_golay(py, suavizado, puerta)
		pz = savitzky_golay(pz, suavizado, puerta)
		#------------------------------------------------------------------		
		# hacer que pase las nuevas posiciones solo si son muy diferentes
		osc.send_message(b"/quinema/p",[px[-2], py[-2], pz[-2]])
		osc.send_message(b"/quinema/r",[rx[-2], ry[-2], rz[-2]])
		if (expresionIB != expresionI):
			osc.send_message(b"/quinema/e",[expresionI])
			expresionIB = expresionI
		if  type(expresion[expresionI]) != int:
			cv2.putText(image, "Expresion: " + str(expresionIB),(0, image.shape[0]- 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (128,128,128), 1, cv2.LINE_AA)					
		#--------------------EXPRESION-------------------------------------
		expresionL = 5000.0
		d = 0.0
		graba = 0
		nueva_expresion = 0
		if rx[-1] > -.25 and rx[-1] < .25 and rz[-1] > -.25 and rz[-1] < .25:		
			faces = dlib.full_object_detections()
			faces.append(predictor(image, dets[0]))
			image1 = dlib.get_face_chip(image, faces[0], size=CH, padding=0.7)
			dets = detector(image1, 1)
			if (not(len(dets) == 0 or len(dets) > 1)) :
				shape_detalle = face_utils.shape_to_np(predictor(image1, dets[0]))
				cara1 = cara
				cara = (np.sum(np.multiply(shape_detalle, shape_detalle)))
				# si la cara es muy diferente a la anterior
				if abs(cara - cara1) < 40000:
				#if True:
					nueva_expresion = 1
					#cv2.imshow("Output1", image1)
					for i in range(len(expresion)):
						if  type(expresion[i]) == int:
							continue
						d = findEuclideanDistance(shape_detalle, expresion[i])
						if d < expresionL:
							expresionL = d
							expresionIB = expresionI
							expresionI  = i
				if g > -1:
					expresion[g] = shape_detalle
					graba = 1
					ayuda = False
					cv2.putText(image, "Expresion: " + str(g),(0, image.shape[0]- 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 1, cv2.LINE_AA)
					#print("guarda la expresion ", g, 0, file=sys.stderr)
					mostrar_texto = True								
					#	break
					g = -1
		#print ( "oscsend localhost 9001 /quinema/r fff " + str("%01.3f" % x[-1]) +"\t"+ str("%01.3f" % y[-5]) +"\t"+ str("%01.3f" % z[-5]))
		#print ( "oscsend localhost 9001 /quinema/p fff " + str("%01.3f" % px[-1]) +"\t"+ str("%01.3f" % py[-1]) +"\t"+ str("%01.3f" % pz[-1]))	
		#print ( "oscsend localhost 9001 /quinema/e f " + str("%01.3f" % di))
		#cv2.imshow("warp", out)
		#sys.stdout.flush()
		if salvar:
			ayuda = False
			cv2.putText(image, txt_save,(0, image.shape[0]- 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 1, cv2.LINE_AA)
			salvar = False
			mostrar_texto = True
		if ayuda:
			cv2.putText(image, txt_help0 ,(3, image.shape[0]- 25), cv2.FONT_HERSHEY_SIMPLEX, 0.42, (0,255,0), 1, cv2.LINE_AA)
			cv2.putText(image, txt_help1 ,(3, image.shape[0]- 10), cv2.FONT_HERSHEY_SIMPLEX, 0.42, (0,255,0), 1, cv2.LINE_AA)
			salvar = False
			entrenar = True
			mostrar_texto = True
		if entrenar:
			cv2.ellipse(image, (int(CW/2),int(CH/2)), (int(CH/2.5/1.5), int(CH/2.5)), 0, 0, 360, ((0,255,0),(128,128,128))[graba == 0], (4,2)[nueva_expresion == 0])
		for (x, y) in shape:
			cv2.circle(image, (x, y), 3, (0, 0, 255), -1)
	cv2.imshow("Output", image)
	osc.send_message(b"/quinema/f", [fotograma])
	fotograma = fotograma + 1
	k = cv2.waitKey((1,400)[mostrar_texto]) & 0xFF
	if cv2.getWindowProperty("Output", cv2.WND_PROP_ASPECT_RATIO) < 0:
	        break
	if k == ord('q'):
		break	
	for x in range(0, 10):
		if k == ord(str(x)):
			g = x
	if k == ord('s'):
		tk = Tk()
		tk.tk.call('tk', 'scaling', 1.0)
		filename =  filedialog.asksaveasfilename(initialfile=filename, filetypes = (("expresiones","*.txt"),("all files","*.*")))
		try:
			with open(filename, 'wb') as file:
				#json.dump( expresion, file)
				pickle.dump(expresion, file)
		except:
			pass
		salvar = True
		entrenar = False
		print(filename)
		tk.destroy()
	if k == ord('o'):
		tk = Tk()
		tk.tk.call('tk', 'scaling', 1.0)
		filename =  filedialog.askopenfilename  (initialfile=filename, filetypes = (("expresiones","*.txt"),("all files","*.*")))
		try:
			with open(filename, 'rb') as file:
 				#expresion = json.load(file)
				expresion = pickle.load(file)
		except:
			pass
		print(filename)
		tk.destroy()
		entrenar = False
	if k == ord('h'):
		ayuda = True

cv2.destroyAllWindows()