# -*- coding: utf-8 -*- """ MotionDetect v1.0 Transform video to activity signal per well Copyright (C) 2019 Fernan R Perez Galvez This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Created on Thu Dec 26 2019 NOTE: before running this script be sure to open in an integrated Developer Environment of your preference (here tested on Spyder with Pyhton 3.7). NOTE: Please be sure to have the required libraries installed before running the script. # # MotionDetect version 1.0 # PART 3/3 # # Description: Transform video to activity signal per well # INPUT: -printed list of regions of interest in console OUTPUT: -histogram of activity per well -dataframe of raw activity data per well -lastframe estimation of time to knockdown corrected for frame-drop Step1. Indicate video file name Step2. Paste list 'pozos' from last script output Step 3. Define file names for 3.a - Histogram figure 3.b - Raw dataset 3.c - First frame method estimation for time to knockdown RUN SCRIPT Step """ import cv2 import numpy as np import pandas as pd from time import process_time from scipy import stats from matplotlib import pyplot as plt # remember to install MoviePy by typing in Spyder prompt # conda install -c conda-forge moviepy # pip install pyjoyplot cap = cv2.VideoCapture('yourVideoName.mp4') # HERE Step 1 salida = [[]] l2 = [] i = 0 ret, frame1 = cap.read() ret, frame2 = cap.read() nombres = ['a1', 'a2','a3','a4','a5','a6','a7','a8','a9', 'a10','a11','a12', 'b1','b2','b3','b4','b5','b6','b7','b8','b9','b10','b11','b12', 'c1','c2','c3','c4','c5','c6','c7','c8','c9','c10','c11','c12', 'd1','d2','d3','d4','d5','d6','d7','d8','d9','d10','d11','d12', 'e1','e2','e3','e4','e5','e6','e7','e8','e9','e10','e11','e12', 'f1','f2','f3','f4','f5','f6','f7','f8','f9','f10','f11','f12', 'g1','g2','g3', 'g4','g5','g6','g7','g8','g9','g10','g11','g12'] # Well positions in video 15-11-2019 line 254 HERE STEP 2 pozos =[ [(575, 293), (623, 341)], [(526, 293), (575, 342)], [(477, 294), (525, 342)], [(427, 292), (476, 342)], [(379, 293), (427, 341)] [(330, 292), (379, 340)], [(281, 290), (330, 339)], [(231, 291),(281, 338)], [(183, 293), (233, 340)], [(135, 293), (184, 342)], [(88, 292), (136, 342)], [(37, 291), (88, 342)], [(576, 246), (625, 291)], [(526, 245), (577, 294)], [(478, 242), (526, 293)], [(428, 243), (479, 293)], [(377, 242), (427, 292)], [(329, 242), (378, 291)], [(281, 243), (332, 291)], [(233, 244), (282, 290)], [(184, 242), (233, 292)], [(135, 243), (184, 293)], [(87, 241), (137, 294)], [(39, 244), (89, 293)], [(575, 194), (625, 246)], [(526, 195), (575, 245)], [(478, 195), (526, 243)], [(428, 194), (479, 244)], [(378, 194), (429, 244)], [(330, 194), (377, 243)], [(283, 194), (332, 242)], [(233, 193), (283, 244)], [(184, 194), (234, 243)], [(134, 194), (185, 244)], [(85, 193), (135, 243)], [(36, 194), (87, 242)], [(576, 145), (627, 194)], [(527, 146), (577, 196)], [(477, 146), (529, 197)], [(428, 146), (478, 195)], [(379, 146), (427, 195)], [(330, 147), (378, 194)], [(283, 146), (332, 193)], [(232, 147), (284, 193)], [(184, 144), (232, 192)], [(134, 146), (184, 193)], [(86, 145), (133, 192)], [(36, 144), (86, 193)], [(578, 97), (628, 144)], [(527, 96), (579, 145)], [(477, 96), (528, 146)], [(427, 97), (479, 146)], [(377, 96), (428, 146)], [(330, 96), (379, 147)], [(280, 96), (331, 146)], [(234, 97), (281, 146)], [(184, 96), (234, 145)], [(134, 97), (186, 146)], [(85, 97), (135, 146)], [(38, 97), (86, 145)], [(578, 47), (629, 97)], [(529, 47), (578, 97)], [(479, 47), (529, 96)], [(429, 47), (478, 96)], [(379, 46), (430, 96)], [(329, 49), (379, 96)], [(281, 48), (330, 95)], [(231, 48), (281, 95)], [(183, 46), (232, 96)], [(130, 48), (184, 96)], [(83, 46), (131, 96)], [(38, 48), (84, 97)], [(583, 2), (630, 47)], [(529, 2), (580, 48)], [(480, 3), (530, 49)], [(431, 3), (481, 49)], [(381, 2), (431, 46)], [(331, 2), (382, 49)], [(284, 1), (332, 49)], [(233, 1), (284, 49)], [(182, 2), (234, 47)], [(133, 1), (183, 47)], [(82, 0), (133, 46)], [(35, 2), (83, 49)]] tamano = pozos # start time flag t1_start = process_time() while cap.isOpened(): if ret: diff = cv2.absdiff(frame1, frame2) gray = cv2.cvtColor(diff,cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (3,3), 0) _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY) dilated = cv2.dilate(thresh, None, iterations=3) frame1 = frame2 ret, frame2 = cap.read() temp2 = [] clone=dilated.copy() for j in range(len(pozos)): temp = clone[pozos[j][0][1]:pozos[j][1][1],pozos[j][0][0]:pozos[j][1][0]] actividad = np.sum(temp)/(temp.shape[0]*temp.shape[1]) temp2.append(actividad) salida.append(temp2) i+=1 else: print('End of video') break print(i) # # stop time # t1_stop = process_time() print("Elapsed time:", t1_stop, t1_start) cv2.destroyAllWindows() cap.release() # # Plot of activity traces # df=pd.DataFrame(salida,columns=nombres) #hist = df.hist(bins=3) #print(hist) df['seconds'] = df.index / 30 colnames = list(df.columns) barras = df.plot(x='seconds', y=colnames[:-1], figsize=(10,50),subplots = True) plt.savefig('activity-yourVideoName.pdf') # HERE Step 3.a # # Time to knockdown analysis # sec = [] for column in df: s = df[column].tolist() ls = [k for k, e in enumerate(s) if e > 0] if len(ls) == 0: sec.append(0) else: sec.append(ls[(len(ls)-1)]/30) duracion = sec.pop() print(sec, duracion) stats.describe(sec) df.to_csv('DataFrame-yourVideoName.csv') # HERE Step 3.b # # frame drop CORRECTION # import ffmpeg probe = ffmpeg.probe('yourVideoName.mp4') # HERE Step 1 (too) totalSec = probe['streams'][1]['duration'] CTmaxCorr = np.float64(sec)*float(totalSec)/duracion resultados = {'Well':nombres,'CTmax':sec, 'CTmaxCorr':CTmaxCorr} dfctmax = pd.DataFrame(resultados) dfctmax.to_csv("result-yourVideoName.csv") # HERE Step 3.c