diff options
author | sotech117 <michael_foiani@brown.edu> | 2023-12-14 21:04:24 -0500 |
---|---|---|
committer | sotech117 <michael_foiani@brown.edu> | 2023-12-14 21:04:24 -0500 |
commit | b66e659ba40b84dbf75e09d5463e2aef1a39b718 (patch) | |
tree | fb6baf8b6cfe86f80c828bc047e4e06d09904f01 | |
parent | 25c3b38b708bf467c40303083190409293452a33 (diff) |
make Recv.py file to read in input
-rw-r--r-- | Recv.py | 53 | ||||
-rw-r--r-- | Sender.py | 44 | ||||
-rw-r--r-- | __pycache__/utils.cpython-38.pyc | bin | 1782 -> 2542 bytes | |||
-rw-r--r-- | utils.py | 59 | ||||
-rw-r--r-- | visualize.py | 35 |
5 files changed, 130 insertions, 61 deletions
@@ -0,0 +1,53 @@ +import struct + +import numpy as np +import pyaudio +import threading +from utils import * + + +class Recv: + def __init__(self, start_freq=19500): + self.start_freq = start_freq + self.freq_range = 500 + self.sampling_rate = 44100 + self.p = pyaudio.PyAudio() + self.bytes_per_transmit = 1 + + + # TODO: use stream to send back the data + self.CHUNK = 2048 * 2 + self.FORMAT = pyaudio.paInt32 + self.CHANNELS = 1 + self.RATE = 44100 + self.pause = False + # stream object + self.p = pyaudio.PyAudio() + self.stream = self.p.open( + format=self.FORMAT, + channels=self.CHANNELS, + rate=self.RATE, + input=True, + output=True, + frames_per_buffer=self.CHUNK, + ) + + def read_audio_stream(self): + data = self.stream.read(self.CHUNK) + data_int = struct.unpack(str(self.CHUNK) + 'i', data) + return data_int + + def listen(self): + while True: + data = self.read_audio_stream() + recv_freq_range = self.freq_range / 2 + wave_to_bits(data, self.start_freq, recv_freq_range, self.bytes_per_transmit) + + +def main(): + recv = Recv() + recv.listen() + + +if __name__ == "__main__": + main() @@ -11,9 +11,12 @@ Play a single frequency. :param duration: Duration of the sound in seconds. :param samplingRate: Sampling rate in Hz. """ + + def play_frequency(freq, amplitude, duration=1.0, samplingRate=44100, p=None): # Generate sample for the given frequency as a float32 array - samples = (amplitude * np.sin(2*np.pi*np.arange(samplingRate*duration)*freq/samplingRate)).astype(np.float32).tobytes() + samples = (amplitude * np.sin(2 * np.pi * np.arange(samplingRate * duration) * freq / samplingRate)).astype( + np.float32).tobytes() # Open stream stream = p.open(format=pyaudio.paFloat32, @@ -37,6 +40,8 @@ Use threads to play multiple frequencies simultaneously. :param duration: Duration of the sound in seconds. :param samplingRate: Sampling rate in Hz. """ + + def play_frequencies_separately(freq_map, duration=1.0, samplingRate=44100): p = pyaudio.PyAudio() @@ -61,6 +66,8 @@ def play_frequencies_separately(freq_map, duration=1.0, samplingRate=44100): :param data: A string of characters. :return: A list of binary strings. """ + + def string_to_binary(data): data_list = [] for char in data: @@ -68,10 +75,13 @@ def string_to_binary(data): data_list.append(binary_representation) return data_list -# transmit string + +# transmit string """ :param data: A string of characters. """ + + def transmit_string(data): data_list = string_to_binary(data) @@ -81,17 +91,20 @@ def transmit_string(data): for j in range(len(data_list[i])): if data_list[i][j] == "0": freq_map[start_freq + j * 250] = 0.0 - + if data_list[i][j] == "1": freq_map[start_freq + j * 250] = 1.0 - + # print(freq_map) play_frequencies_separately(freq_map, duration=1000) + """ :param data: A list of peak frequencies. return: A string of characters. """ + + def receive_string(data, start_freq=18000, freq_step=250): binary = ['0'] * 8 @@ -105,6 +118,7 @@ def receive_string(data, start_freq=18000, freq_step=250): except ValueError: return "Error: Invalid binary data" + # Example usage # data for the letter h # 01101000 @@ -117,36 +131,33 @@ print(decoded_string) class LinkLayer: - def __init__(self, start_freq=19800): + def __init__(self, start_freq=19500): self.start_freq = start_freq - self.freq_range = 200 + self.freq_range = 500 self.sampling_rate = 44100 self.p = pyaudio.PyAudio() self.isReceiving = False self.isEstablished = False self.bytes_per_transmit = 1 + self.stream = self.p.open(format=pyaudio.paFloat32, channels=1, rate=44100, output=True) def transmit_string(self, data): data_list = string_to_binary(data) - play_data(data_list, self.start_freq, self.freq_range, self.bytes_per_transmit, self.p) - + send_freq_range = self.freq_range / 2 + play_data(data_list, self.start_freq, send_freq_range, self.bytes_per_transmit, self.stream) + def send_data(self): while True: if not self.isReceiving: user_input = input("Enter data to send: ") if user_input == "exit" or user_input == "q": + self.stream.stop_stream() + self.stream.close() break self.transmit_string(user_input) else: print("Currently receiving data, please wait...") - -# take in range width, the number of bytes, and the bytes themselves, and starting freq - -# cmdline args: data, start freq, bytes per transmit, frequency range -# 18500, 1000 range - -# vlistener takes in no data. def main(): link_layer = LinkLayer() @@ -157,5 +168,6 @@ def main(): # Start the threads send_thread.start() + if __name__ == "__main__": - main()
\ No newline at end of file + main() diff --git a/__pycache__/utils.cpython-38.pyc b/__pycache__/utils.cpython-38.pyc Binary files differindex 2511512..01f5eb4 100644 --- a/__pycache__/utils.cpython-38.pyc +++ b/__pycache__/utils.cpython-38.pyc @@ -4,6 +4,38 @@ import numpy as np import pyaudio import threading +from scipy.fftpack import fft + + +def wave_to_bits(wave, starting_freq, freq_range, bytes_per_transmit, chunk=4096, rate=44100): + spectrum = fft(wave) + spectrum = np.abs(spectrum) + spectrum = spectrum / (np.linalg.norm(spectrum) + 1e-16) + + # FIXME: update to self values, given if ur a sender or receiver + starting_freq = starting_freq + end_freq = starting_freq + freq_range + freq_to_index_ratio = (chunk - 1) / rate + + # only accept the scaled spectrum from our starting range to 20000 Hz + starting_range_index = int(starting_freq * freq_to_index_ratio) + ending_range_index = int(end_freq * freq_to_index_ratio) + restricted_spectrum = spectrum[starting_range_index:ending_range_index + 1] + + # get the n indices of the max peaks of amplitude greater than .125, within our confined spectrum + indices = np.argwhere(restricted_spectrum > .125) + + freqs = [int((indices[i] + starting_range_index) / freq_to_index_ratio) for i in range(len(indices))] + + # convert the frequencies to bits + data = frequencies_to_bits(freqs, calculate_send_frequencies(starting_freq, freq_range, bytes_per_transmit)) + + # TODO: remove + byte = data[:8] + if data[-1] == '1': + receive_string(byte) + + return data def calculate_send_frequencies(start_freq, freq_range, bytes_per_transmit): @@ -15,49 +47,50 @@ def calculate_send_frequencies(start_freq, freq_range, bytes_per_transmit): f = int(start_freq + (i + 1) * freq_interval) freq_list.append(f) - print(freq_list) - return freq_list -def frequencies_to_bytes(frequencies, expected_freqs): +def frequencies_to_bits(frequencies, expected_freqs): # get the interval between frequencies, so we can clamp the range around them freq_interval = expected_freqs[1] - expected_freqs[0] plus_minus = freq_interval // 2 - byte_list = ['0'] * len(expected_freqs) + bit_list = ['0'] * len(expected_freqs) for freq in frequencies: for i in range(len(expected_freqs)): # clamp the range around the frequency to the frequency if expected_freqs[i] - plus_minus <= freq < expected_freqs[i] + plus_minus: - byte_list[i] = '1' + bit_list[i] = '1' - return byte_list + return bit_list -def play_data(data, start_freq, freq_step, bytes_per_transmit, p): + +def play_data(data, start_freq, freq_step, bytes_per_transmit, stream): freq_list = calculate_send_frequencies(start_freq, freq_step, bytes_per_transmit) + send_duration = 1.0 + + flip_flag = 0 # TODO: make this global between plays for byte in data: + byte = byte + str(flip_flag) + '1' print(byte) samples = None for i, bit in enumerate(byte): if bit == '1': print(freq_list[i]) - s = .125 * np.sin(2 * np.pi * np.arange(44100 * 10.0) * freq_list[i] / 44100) + s = .125 * np.sin(2 * np.pi * np.arange(44100 * send_duration) * freq_list[i] / 44100) if samples is None: samples = s else: samples = np.add(samples, s) if samples is not None: - print(samples) - stream = p.open(format=pyaudio.paFloat32, channels=1, rate=44100, output=True) stream.write(samples.astype(np.float32).tobytes()) - stream.stop_stream() - stream.close() + flip_flag = (flip_flag + 1) % 2 + def receive_string(binary): binary_string = ''.join(binary) try: print(chr(int(binary_string, 2))) except ValueError: - print("Error: Invalid binary data")
\ No newline at end of file + print("Error: Invalid binary data") diff --git a/visualize.py b/visualize.py index 759cf34..23016a0 100644 --- a/visualize.py +++ b/visualize.py @@ -45,37 +45,8 @@ class Test(object): scaled_spectrum = np.abs(spectrum) scaled_spectrum = scaled_spectrum / (np.linalg.norm(scaled_spectrum) + 1e-16) - # FIXME: update to self values, given if ur a sender or receiver - starting_freq = 19800 - end_freq = 20000 - freq_to_index_ratio = self.CHUNK / self.RATE - # only accept the scaled spectrum from our starting range to 20000 Hz - starting_range_index = int(starting_freq * freq_to_index_ratio) - ending_range_index = int(end_freq * freq_to_index_ratio) - print(starting_freq, end_freq, starting_range_index, ending_range_index) - restricted_spectrum = scaled_spectrum[starting_range_index:ending_range_index + 1] - - # normalize the restricted spectrum - indices = np.argwhere(restricted_spectrum > .125) - print(indices) - - freqs = [int((indices[i] + starting_range_index) / freq_to_index_ratio) for i in range(len(indices))] - print(freqs) - - p = u.frequencies_to_bytes(freqs, u.calculate_send_frequencies(19800, 200, 1)) - data = p[:8] - print(data) - u.receive_string(data) - - # get the n indices of the max peaks, within our confined spectrum - # FIXME: update to self values - bytes = 1 - num_bits = bytes * 8 + 2 - if num_bits > len(restricted_spectrum): - print("ERROR: num_bits > len(restricted_spectrum)") - - # print(index_to_freq[max_index], max_index, max_index * self.RATE / (self.CHUNK - 1)) - return freqs, scaled_spectrum + # get the index of the max + return scaled_spectrum def read_audio_stream(self): data = self.stream.read(self.CHUNK) @@ -86,7 +57,7 @@ class Test(object): self.init_plots() while not self.pause: waveform = self.read_audio_stream() - freq_max, scaled_spectrum = self.get_fundamental_frequency(waveform) + scaled_spectrum = self.get_fundamental_frequency(waveform) # update figure canvas if wanted if graphics: |