Commit bd42f19f authored by Francisco Romero's avatar Francisco Romero

restructure and simplify scripts and create module

parent 2871a1c8
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
bin/
build/
develop-eggs/
dist/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Rope
.ropeproject
# Django stuff:
*.log
*.pot
# Sphinx documentation
docs/_build
......@@ -4,7 +4,6 @@ This repository includes scripts for the extraction and analysis of the midline
## TODO
* Refactor midline.py
* Eignenvalues
* Eignenshapes
* Create modules
from .fish_contour import *
from .fishmidline import *
import numpy as np
import scipy.ndimage
import cv2
from scipy.signal import argrelmax
SMOOTH_SIGMA = 10
HEAD_DIAMETER = 20
def find_max(curv,n=2):
max_i = argrelmax(curv, mode='wrap')[0]
b_s = sorted(max_i.tolist(), key=lambda x:curv[x])
try:
nose = b_s[-n]
tail = b_s[-1]
return nose, tail
except IndexError:
print("Warning, no nose detected in a frame. The maximum is returned instead")
return b_s[-1], b_s[-1]
class FishContour():
def __init__(self, curve):
self.c = np.squeeze(curve.astype('float32'))
@classmethod
def fromcv2contour(cls, cv2contour):
return cls(cv2contour[:, 0, :].astype(np.float32))
def __str__(self):
return self.c.__str__()
def derivative(self):
return scipy.ndimage.convolve1d(self.c,
[-0.5, 0.0, 0.5], mode='wrap', axis=0)
def second_derivative(self):
return scipy.ndimage.convolve1d(self.c,
[1.0, -2.0, 1.0], mode='wrap', axis=0)
def curvature(self):
[x_1, y_1] = self.derivative().transpose()
[x_2, y_2] = self.second_derivative().transpose()
return (x_1 * y_2 - y_1 * x_2) / np.power(x_1 * x_1 + y_1 * y_1, 3 / 2)
def find_nose_tail(self, smooth_sigma):
smoother = smooth(self, smooth_sigma)
nose_i, tail_i = find_max(abs(smoother.curvature()), n=2)
return self.c[nose_i, :], self.c[tail_i, :]
def find_nose_and_orientation(self,
head_size=HEAD_DIAMETER,
smooth_sigma=SMOOTH_SIGMA):
nose, tail = self.find_nose_tail(smooth_sigma) # .astype(np.int32)
distance = np.power(self.c[:, 0] - nose[0], 2) + \
np.power(self.c[:, 1] - nose[1], 2)
head_centroid = \
FishContour(self.c[np.where(distance < head_size * head_size)]
).centroid()
orvec = nose - head_centroid
angle = np.degrees(np.arctan2(orvec[1], orvec[0]))
return nose, tail, angle + 90, np.array(head_centroid)
def ascvcontour(self):
return np.expand_dims(self.c, axis=1)
def centroid(self):
M = cv2.moments(self.ascvcontour())
try:
cX = (M["m10"] / M["m00"])
cY = (M["m01"] / M["m00"])
except:
cX = 0
cY = 0
print('Warning: M["m00"] might be zero')
return (cX, cY)
def smooth(contour, smooth_sigma):
return FishContour(scipy.ndimage.filters.gaussian_filter1d(contour.c,
SMOOTH_SIGMA,
mode='wrap',
axis=0))
import numpy as np
from skimage.morphology import skeletonize
from scipy import interpolate
from fishmidline.fish_contour import FishContour
def get_binary_image(height, width, pxs):
pxs = np.array(np.unravel_index(pxs, (height, width))).T
binary_image = np.zeros((height, width)).astype('bool')
binary_image[pxs[:, 0], pxs[:, 1]] = True
return binary_image
def get_nose_fish(cnt):
f_cnt = FishContour(cnt)
nose, _, _, _ = f_cnt.find_nose_and_orientation()
return nose
def distance(p1, p2):
return np.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
def order_midline(midline_x, midline_y, nose):
points = list(zip(midline_x, midline_y))
midline = []
nose = tuple(nose)
if nose in points:
points.remove(nose)
midline.append(nose)
while len(midline) < len(midline_x):
distances = [distance(p1, midline[-1]) for p1 in points]
index = distances.index(np.min(distances))
midline.append(points[index])
points.remove(points[index])
return zip(*midline)
def compute_equidistant_points(x, y, num_points=None,
dist_between_pts=None):
tck, u = interpolate.splprep([x, y])
unew = np.arange(0, 1.01, 0.001)
x, y = interpolate.splev(unew, tck)
tol = dist_between_pts
i, idx = [0], [0]
while i[-1] < len(x):
total_dist = 0
for j in range(i[-1] + 1, len(x)):
total_dist += np.sqrt((x[j] - x[j - 1])**2 + (y[j] - y[j - 1])**2)
if total_dist > tol:
idx.append(j)
break
i.append(j + 1)
if i[-1] == i[-2]:
break
xn = x[idx]
yn = y[idx]
return xn, yn, x, y
def compute_angles_between_segments(midline_eq_x, midline_eq_y):
def compute_angle(x0, x1, y0, y1):
return np.arctan2(x1 - x0, y1 - y0)
angles = [compute_angle(midline_eq_x[1], midline_eq_x[0],
midline_eq_y[1], midline_eq_y[0])]
for i in range(1, len(midline_eq_x) - 2):
angle = compute_angle(midline_eq_x[i + 1], midline_eq_x[i],
midline_eq_y[i + 1], midline_eq_y[i])
if np.abs(angles[-1] - angle) > np.pi and angles[-1] - angle > np.pi:
angle += 2 * np.pi
elif np.abs(angles[-1] - angle) > np.pi and angles[-1] - angle < np.pi:
angle -= 2 * np.pi
angles.append(angle)
angles = angles - angles[0]
return angles
def get_midline_angles(video, blob, plot_flag=False,
distance_between_points=3,
debug=False):
# Get binary image of blob
binary_image = get_binary_image(video.height, video.width, blob.pixels)
# Skeletonize segmented image
skeleton = skeletonize(binary_image.astype(bool))
# Extract midline and order points in midline
midline_y, midline_x = np.where(skeleton == 1)
# Order midline from nose to tail
nose = get_nose_fish(blob.contour)
midline_x, midline_y = order_midline(midline_x, midline_y, nose)
# Interpolated and compute equidistant points in the midline
midline_eq_x, midline_eq_y, midline_interp_x, midline_interp_y = \
compute_equidistant_points(midline_x, midline_y,
dist_between_pts=distance_between_points)
# Compute angles and midline
angles = compute_angles_between_segments(midline_eq_x, midline_eq_y)
if not plot_flag:
return angles
else:
return binary_image, skeleton, nose, blob.contour,\
(midline_x, midline_y), (midline_interp_x, midline_interp_y),\
(midline_eq_x, midline_eq_y),\
angles
import os
from tqdm import tqdm
import numpy as np
from fishmidline import get_midline_angles
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--session_path", help="path to the session folder\
create by idtracker.ai",
type=str)
parser.add_argument("-d", "--distance_between_points", help='distance\
between equidistant points in the mideline',
type=int,
default=3)
args = parser.parse_args()
dist_pts = args.distance_between_points
session_path = args.session_path
video_path = os.path.join(session_path, 'video_object.npy')
blobs_path = os.path.join(session_path, 'preprocessing',
'blobs_collection.npy')
print("Loading video information from {}".format(video_path))
video = np.load(video_path, encoding='latin1').item()
print("Loading list_of_blobs from {}".format(blobs_path))
list_of_blobs = np.load(blobs_path, encoding='latin1').item()
num_frames = video.number_of_frames
all_angles = np.zeros((num_frames, video.number_of_animals,
int(video.median_body_length // dist_pts))) * np.nan
len_angles = []
for frame_number in tqdm(range(num_frames), desc='computing...'):
blobs_in_frame = list_of_blobs.blobs_in_video[frame_number]
for blob in blobs_in_frame:
if blob.is_an_individual:
angles = get_midline_angles(video, blob,
distance_between_points=dist_pts)
len_angles.append(len(angles))
if len(angles) < all_angles.shape[2]:
all_angles[frame_number, blob.identity - 1,
:len(angles)] = angles
else:
all_angles[frame_number, blob.identity - 1,
:len(angles)] = angles[:all_angles.shape[2]]
import os
import numpy as np
import matplotlib.pyplot as plt
from fishmidline import get_midline_angles
def plotter(ax_arr, binary_image, skeleton, nose, contour,
midline_xy, midline_interp_xy, midline_eq_xy):
x_lim = [np.min(np.where(binary_image)[1]) - 1,
np.max(np.where(binary_image)[1]) + 1]
y_lim = [np.min(np.where(binary_image)[0]) - 1,
np.max(np.where(binary_image)[0]) + 1]
# Plot binary image
ax_arr[0, 0].imshow(binary_image, cmap='gray')
ax_arr[0, 0].set_title('Segmented blob in binary full frame')
# Plot zoomed binary image
ax_arr[0, 1].imshow(binary_image, cmap='gray')
ax_arr[0, 1].set_xlim(x_lim), ax_arr[0, 1].set_ylim(y_lim)
ax_arr[0, 1].invert_yaxis()
ax_arr[0, 1].set_title('(Zoom)')
# Plot skeleton and nose
ax_arr[0, 2].imshow(skeleton, cmap='gray')
ax_arr[0, 2].set_xlim(x_lim), ax_arr[0, 2].set_ylim(y_lim)
ax_arr[0, 2].invert_yaxis()
ax_arr[0, 2].set_title('Skeleton from\nbinary image')
# Plot contour and nose
print()
contour = np.squeeze(np.asarray(contour))
ax_arr[0, 3].plot(contour[:, 0], contour[:, 1])
ax_arr[0, 3].plot(nose[0], nose[1], 'ro')
ax_arr[0, 3].set_xlim(x_lim), ax_arr[0, 3].set_ylim(y_lim)
ax_arr[0, 3].invert_yaxis()
ax_arr[0, 3].set_aspect('equal')
ax_arr[0, 3].set_title('Contour and\nfish nose')
# Plot midline points
ax_arr[1, 0].plot(midline_xy[0], midline_xy[1], '.')
ax_arr[1, 0].invert_yaxis()
ax_arr[1, 0].set_aspect('equal')
ax_arr[1, 0].set_title('Skeleton points\nincluding nose')
# Plot interpolated midline
ax_arr[1, 1].plot(midline_interp_xy[0], midline_interp_xy[1], '-')
ax_arr[1, 1].invert_yaxis()
ax_arr[1, 1].set_aspect('equal')
ax_arr[1, 1].set_title('Skeleton\ninterpolated')
# Plot equidistant midline
ax_arr[1, 2].plot(midline_eq_xy[0], midline_eq_xy[1], '-o')
ax_arr[1, 2].invert_yaxis()
ax_arr[1, 2].set_aspect('equal')
ax_arr[1, 2].set_title('Equidistant points\nin skeleton')
# Plot binary_image with ovelayed midline
ax_arr[1, 3].imshow(binary_image, cmap='gray')
ax_arr[1, 3].plot(midline_eq_xy[0], midline_eq_xy[1], '-o')
ax_arr[1, 3].set_xlim(x_lim), ax_arr[1, 3].set_ylim(y_lim)
ax_arr[1, 3].invert_yaxis()
ax_arr[1, 3].set_title('Fish midline')
if __name__ == '__main__':
plt.ion()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--session_path", help="path to the session folder\
create by idtracker.ai",
type=str)
args = parser.parse_args()
session_path = args.session_path
video_path = os.path.join(session_path, 'video_object.npy')
blobs_path = os.path.join(session_path, 'preprocessing',
'blobs_collection.npy')
print("Loading video information from {}".format(video_path))
video = np.load(video_path, encoding='latin1').item()
print("Loading list_of_blobs from {}".format(blobs_path))
list_of_blobs = np.load(blobs_path, encoding='latin1').item()
FOCAL_IDENTITY = 1
for i in range(1):
FRAME_NUMBER = np.random.randint(0, video.number_of_frames // 2)
blobs_in_frame = list_of_blobs.blobs_in_video[FRAME_NUMBER]
identities = [blob.identity for blob in blobs_in_frame]
print(identities)
if FOCAL_IDENTITY in identities:
blob = [blob for blob in blobs_in_frame
if blob.identity == FOCAL_IDENTITY][0]
binary_image, skeleton, nose, contour,\
midline_xy, midline_interp_xy, midline_eq_xy, _ = \
get_midline_angles(video, blob, plot_flag=True)
# # Plot images
fig, ax_arr = plt.subplots(2, 4, figsize=(15, 10))
plotter(ax_arr, binary_image, skeleton, nose, contour,
midline_xy, midline_interp_xy, midline_eq_xy)
else:
print("No identity in this frame")
plt.show()
This diff is collapsed.
from setuptools import setup
setup(name='fishmidline',
version='0.1',
description='fishmidline',
url='https://gitlab.com/polavieja_lab/midline',
author='Francisco Romero-Ferrero',
author_email='[email protected]',
license='GPL',
packages=['fishmidline'],
zip_safe=False)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment