Commit 2ec0e50a authored by Jackson Garner's avatar Jackson Garner

More vignette stuff and prerendering

parent db9a35ca
import numpy as np
def fftToImage(a, image):
start = 0
for i in a:
if i < .2:
start += 1
else:
break
a = a[start:a.size - start]
start = np.array([0])
a -= 0.2
a = a.clip(0, 1000)
start = np.nonzero(a)[0]
i = 0
jump = a.size / image.shape[1] * 10
if a.size != 0:
if start.size != 0:
a = a[start[0]:a.size - start[0]]
jump = a.size / image.shape[1] * 10
# ins = np.linspace(0, a.size - 1, image.size / 10)
# ins = np.repeat(ins, 10)
# ins = ins.astype(int)
# image = image.transpose(1, 0, 2)
# image[:] = a[ins].reshape((1280, 720, 3))
# image = image.transpose(1, 0, 2)
for p in image.transpose(1, 0, 2):
p[:] = a[int(int(i)*jump)] * 10
i += 0.1
......
import matplotlib
matplotlib.use('TkAgg')
# import matplotlib
# matplotlib.use('TkAgg')
import time
import matplotlib.pyplot as plt
import cProfile
# import matplotlib.pyplot as plt
import numpy as np
import scipy
import scipy.io.wavfile as wavfile
......@@ -24,6 +25,7 @@ load_from_file = False
file_name = None
wave_type = 0
fs=44100
playing = False
def mkBounds(name):
......@@ -61,6 +63,7 @@ else:
except:
load_from_file = True
file_name = sys.argv[1]
playing = sys.argv[2] == "-p"
if load_from_file == False:
wav_data, rate = generate_waves.generate_waves(wave_type, 52)
......@@ -71,9 +74,9 @@ elif load_from_file == True:
boundaries, labels = mkBounds(sys.argv[1])
beat_times = mkBeat(sys.argv[1])
plt.ion()
fig = plt.figure()
fft_plot = fig.add_subplot(121)
# plt.ion()
# fig = plt.figure()
# fft_plot = fig.add_subplot(121)
#image = fig.add_subplot(122)
image = np.ones((720, 1280, 3))
......@@ -81,20 +84,37 @@ image = np.ones((720, 1280, 3))
sounddevice.play(wav_data, rate)
timestep = .01
t = time.time()
origin = time.time()
nextBeat = 0
cap = cv2.VideoCapture('snow.webm')
timestep = 1/cap.get(cv2.CAP_PROP_FPS)
if playing:
cap = cv2.VideoCapture('outpy.avi')
else:
cap = cv2.VideoCapture('snow.webm')
fps = cap.get(cv2.CAP_PROP_FPS)
timestep = 1/fps
print(timestep)
is_beat = False
beat_distance = 0
# Default resolutions of the frame are obtained.The default resolutions are system dependent.
# We convert the resolutions from float to integer.
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
if not playing:
out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), fps, (frame_width,frame_height))
frame_at = -1
while t - origin < len(wav_data) / rate:
frame_at += 1
ret, frame = cap.read()
if load_from_file == True:
try:
if time.time() - origin >= beat_times[nextBeat]:
if (frame_at * timestep) >= beat_times[nextBeat]:
nextBeat += 1
# print(f"Beat {t - origin}")
is_beat = True
......@@ -103,50 +123,59 @@ while t - origin < len(wav_data) / rate:
# print("Last beat encountered")
while (time.time() - t < timestep):
pass
t = time.time()
beat_distance = beat_times[nextBeat] - time.time() + origin
second = wav_data[math.floor((t - origin) * rate) : math.floor((t - origin + timestep) * rate)]
transform = np.abs(np.fft.fft(second[:]))
plot_transform = np.fft.fftshift(transform)
# print(ImageFunctions.getBaseWeight(transform))
max_val = np.amax(transform)
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_frame)
#v = np.where(True, 255 * (350 - max_val) / 350, 255)
#v *= math.floor((350 - max_val) / 350)
np.multiply(s, (((350 - max_val) / 350) + 0.5*5)/6, out=s, casting='unsafe')
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
#image.xticks([])
#image.ytacks([])
#image.show()
a = np.abs(np.fft.fftshift(np.fft.fft(second[:])))
fftToImage.fftToImage(a, image)
#cv2.imshow("t", (img * (1/255) + image).clip(0, 1))
preV = (img * (1/255)).clip(0, 1)
row,cols,rgb = img.shape
a = cv2.getGaussianKernel(cols, 350.0 / (beat_distance))
b = cv2.getGaussianKernel(row, 350.0 / (beat_distance))
c = b*a.T
d = c/c.max()
d = np.array([d, d, d]).transpose(1, 2, 0) + image
preV = preV * ( d)
cv2.imshow('frame', preV)
cv2.waitKey(1)
# cv2.imshow('frame', img)
# cv2.waitKey(1)
peaks, _ = signal.find_peaks(transform, prominence=20)
is_beat = False
plot = False
# while (time.time() - t < timestep):
# pass
if not playing:
t = time.time()
beat_distance = beat_times[nextBeat] - frame_at * timestep
second = wav_data[math.floor((frame_at * timestep) * rate) : math.floor(((frame_at + 1) * timestep) * rate)]
transform = np.abs(np.fft.fft(second[:]))
plot_transform = np.fft.fftshift(transform)
# print(ImageFunctions.getBaseWeight(transform))
max_val = np.amax(transform)
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_frame)
#v = np.where(True, 255 * (350 - max_val) / 350, 255)
#v *= math.floor((350 - max_val) / 350)
np.multiply(s, (((350 - max_val) / 350) + 0.5*5)/6, out=s, casting='unsafe')
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
#image.xticks([])
#image.ytacks([])
#image.show()
a = np.abs(np.fft.fftshift(np.fft.fft(second[:])))
fftToImage.fftToImage(a, image)
#cv2.imshow("t", (img * (1/255) + image).clip(0, 1))
preV = (img * (1/255)).clip(0, 1)
row,cols,rgb = img.shape
a = cv2.getGaussianKernel(cols, 350.0 / (beat_distance))
b = cv2.getGaussianKernel(row, 350.0 / (beat_distance))
c = b*a.T
d = c/c.max()
d = np.array([d, d, d]).transpose(1, 2, 0) + image
preV = preV * ( d)
cv2.imshow('frame', preV)
# preV = cv2.cvtColor(preV, cv2.CV_8UC1)
out.write(np.uint8(preV * 170))
cv2.waitKey(1)
# cv2.imshow('frame', img)
# cv2.waitKey(1)
peaks, _ = signal.find_peaks(transform, prominence=20)
is_beat = False
plot = False
else:
cv2.imshow('frame', frame)
cv2.waitKey(1)
plot = False
if plot is True:
print(peaks)
......@@ -163,3 +192,4 @@ while t - origin < len(wav_data) / rate:
fig.canvas.draw()
fig.canvas.flush_events()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment