diff --git a/ambilight.py b/ambilight.py new file mode 100644 index 0000000..530c27d --- /dev/null +++ b/ambilight.py @@ -0,0 +1,109 @@ +import pyautogui +import scipy.cluster as cluster +import scipy +import sys +import numpy as np +from time import sleep, time +import cupy +import signal +import atexit +from time import perf_counter as timestamp +from telemetrix_rpi_pico import telemetrix_rpi_pico +import threading +import PIL + +def sigint_handler(signal=None, frame=None): + print ('KeyboardInterrupt is caught') + board.neopixel_clear() + sleep(0.75) + board.shutdown() + sys.exit(0) +signal.signal(signal.SIGINT, sigint_handler) +atexit.register(sigint_handler) +global board +board = telemetrix_rpi_pico.TelemetrixRpiPico() + +FAST_MODE = True +def find_dorminant_color(im): + if FAST_MODE: + color = cupy.reshape(cupy.asarray(im),(-1,3)) + color = cupy.mean(color,axis=0) + + return color + else: + NUM_CLUSTERS = 5 + + im = im.resize((150, 150)) + ar = np.asarray(im) + shape = ar.shape + ar = ar.reshape(np.product(shape[:2]), shape[2]).astype(float) + codes, dist = cluster.vq.kmeans(ar, NUM_CLUSTERS) + + vecs, dist = cluster.vq.vq(ar, codes) + counts, bins = np.histogram(vecs, len(codes)) + + index_max = np.argmax(counts) + peak = codes[index_max] + + return peak + +global NUM_LEDS,BOARDER_SIZE,TOP_LEDS,RIGHT_LEDS,BUTTOM_LEDS,LEFT_LEDS,screenshot + +NUM_LEDS = 60 +BOARDER_SIZE = 300 +TOP_LEDS = 19 +RIGHT_LEDS = 11 +BUTTOM_LEDS = 17 +LEFT_LEDS = 10 + +board.set_pin_mode_neopixel(pin_number=2,num_pixels=NUM_LEDS) +board.neopixel_clear(auto_show=True) + +lastTime = timestamp() + +def processTopLeds(): + global NUM_LEDS,BOARDER_SIZE,TOP_LEDS,RIGHT_LEDS,BUTTOM_LEDS,LEFT_LEDS,board,screenshot + top = screenshot.crop(box=[0,0,size.width,BOARDER_SIZE]) + while True: + for i in range(0,TOP_LEDS): + segment = top.crop(box=[i*size.width/TOP_LEDS,0,(i+1)*size.width/TOP_LEDS,BOARDER_SIZE]) + colors = [int(color) for color in find_dorminant_color(segment)] + board.neo_pixel_set_value(i,r=colors[0],g=colors[1],b=colors[2]) + board.neopixel_show() +def processLeftLeds(): + global NUM_LEDS,BOARDER_SIZE,TOP_LEDS,RIGHT_LEDS,BUTTOM_LEDS,LEFT_LEDS,board,screenshot + while True: + left = screenshot.crop(box=[0,0,BOARDER_SIZE,size.height]) + for i in range(0,LEFT_LEDS): + segment = left.crop(box=[0,i*size.height/LEFT_LEDS,BOARDER_SIZE,(i+1)*size.height/LEFT_LEDS]) + colors = [int(color) for color in find_dorminant_color(segment)] + board.neo_pixel_set_value(TOP_LEDS+RIGHT_LEDS+BUTTOM_LEDS+LEFT_LEDS-i,r=colors[0],g=colors[1],b=colors[2]) + board.neopixel_show() +def processButtomLeds(): + global NUM_LEDS,BOARDER_SIZE,TOP_LEDS,RIGHT_LEDS,BUTTOM_LEDS,LEFT_LEDS,board,screenshot + while True: + buttom = screenshot.crop(box=[0,size.height-BOARDER_SIZE,size.width,size.height]) + for i in range(0,BUTTOM_LEDS): + segment = buttom.crop(box=[i*size.width/BUTTOM_LEDS,0,(i+1)*size.width/BUTTOM_LEDS,BOARDER_SIZE]) + colors = [int(color) for color in find_dorminant_color(segment)] + board.neo_pixel_set_value(TOP_LEDS+RIGHT_LEDS+BUTTOM_LEDS-i,r=colors[0],g=colors[1],b=colors[2]) + board.neopixel_show() +def processRightLeds(): + global NUM_LEDS,BOARDER_SIZE,TOP_LEDS,RIGHT_LEDS,BUTTOM_LEDS,LEFT_LEDS,board,screenshot + while True: + right = screenshot.crop(box=[size.width-BOARDER_SIZE,0,size.width,size.height]) + for i in range(0,RIGHT_LEDS): + segment = right.crop(box=[0,i*size.height/RIGHT_LEDS,BOARDER_SIZE,(i+1)*size.height/RIGHT_LEDS]) + colors = [int(color) for color in find_dorminant_color(segment)] + board.neo_pixel_set_value(i+TOP_LEDS,r=colors[0],g=colors[1],b=colors[2]) + board.neopixel_show() +screenshot = pyautogui.screenshot() +size = pyautogui.size() +threading.Thread(target=processTopLeds).start() +threading.Thread(target=processLeftLeds).start() +threading.Thread(target=processRightLeds).start() +threading.Thread(target=processButtomLeds).start() +while True: + screenshot = pyautogui.screenshot() + + diff --git a/beat_detection.py b/beat_detection.py new file mode 100644 index 0000000..8caaebf --- /dev/null +++ b/beat_detection.py @@ -0,0 +1,24 @@ +import scipy.signal as signal +import pyaudio +from matplotlib import pyplot as plt +import numpy as np +from time import sleep + +SAMPLE_SIZE = 1024 + +audio = pyaudio.PyAudio() +audioStream = audio.open(format=pyaudio.paInt16, channels=1, rate=1000, input=True, frames_per_buffer=SAMPLE_SIZE) + +while True: + data = audioStream.read(SAMPLE_SIZE) + sample = np.frombuffer(data, dtype=np.int16) + + # plot data + plt.plot(sample) + plt.show() + freqdom_signal = signal.stft(sample) + print(freqdom_signal) +# close stream +audioStream.stop_stream() +audioStream.close() +audio.terminate() \ No newline at end of file diff --git a/test.py b/test.py new file mode 100644 index 0000000..5414ecf --- /dev/null +++ b/test.py @@ -0,0 +1,7 @@ +import cupy +a = cupy.asarray([[[1, 2, 3],[4,5,6],[7,8,9]],[[10, 11, 12],[13,14,15],[16,17,18]]]) +print(a) +a= cupy.reshape(a,(-1,3)) +print(a) +a = cupy.median(a,axis=0) +print(a) \ No newline at end of file