from theonerig.synchro.io import *
from theonerig.core import *
from theonerig.utils import *
import matplotlib.pyplot as plt
photodiode_data = load_adc_raw("./files/basic_synchro/photodiode_data", sampling_rate=30000)
Supposidly, get_thresholds should provide low and high threshold for the data, but the low_treshold is a sensitive value that should be checked manually in a record
get_first_high
finds the idx of the first frame higher than the threshold
detect_frames do frame detection. Works for camera pulses and photodiode data emitted by a DLP. It does it by:
- Finding the first frame higher than a threshold
- Detecting the frames before if flag do_reverse is set to True
- Detect frames
- Assign a binary value of if it's higher than the high threshold
- Do a quick check on the frames to spot weird results
frame_timepoints, frame_signals = detect_frames(photodiode_data, 200, 1000, increment=500)
plt.figure()
plt.plot(photodiode_data)
plt.scatter(frame_timepoints, frame_signals*800+600, c="r")
Frame signals are then refined using cluster_frame_signals of the signals to attribute them a value in a defined range
frame_signals = cluster_frame_signals(photodiode_data, frame_timepoints, n_cluster=5)
plt.figure()
plt.plot(photodiode_data[120000:131800])
plt.scatter(frame_timepoints[frame_timepoints>120000]-120000, frame_signals[frame_timepoints>120000]*200+200, c='r')
With the frame detected, we can create our record master, often named reM
ref_timepoints, ref_signals = extend_sync_timepoints(frame_timepoints, frame_signals, up_bound=len(photodiode_data))
reM = RecordMaster([(ref_timepoints, ref_signals)])
print(len(reM[0]))
Though the reM we just created is from a tiny portion of real data. From now one we will use a premade reM generated from the same dataset, in full.
reM = import_record("./files/basic_synchro/reM_basic_synchro.h5")
record_time = parse_time("200331_170849") #Starting time of that example record found on the filename of the record
print(record_time)
match_starting_position seaks in the record the first frame of a stimulus. We can use functions from theonerig.synchro.extracting to find out the stimuli used in that record, and get their values
from theonerig.synchro.extracting import get_QDSpy_logs, unpack_stim_npy
log = get_QDSpy_logs("./files/basic_synchro")[0]
print(log.stimuli[2])
unpacked_checkerboard = unpack_stim_npy("./files/basic_synchro/stimulus_data", "eed21bda540934a428e93897908d049e")
print(unpacked_checkerboard[0].shape, unpacked_checkerboard[1].shape, unpacked_checkerboard[2])
get_position_estimate can approximately tell us where the stimulus should be to reduce the search time
estimate_start = get_position_estimate(log.stimuli[2].start_time, record_time, sampling_rate=30000)
print("Estimate position in sample points", estimate_start)
stim_start_frame = match_starting_position(reM["main_tp"][0], reM["signals"][0], stim_signals=unpacked_checkerboard[1], estimate_start=estimate_start)
print(stim_start_frame)
Let's see the match we obtain
display_match(stim_start_frame, reference=unpacked_checkerboard[1], recorded=reM["signals"][0])
We have a match!! But be sure to check it everytime, as mismatches occurs. Set then stim_start_frame manually
We correct the stimulus values with frame_error_correction and it gives us back the changes it made to keep track of the errors made.
signals = reM["signals"][0][stim_start_frame:stim_start_frame+len(unpacked_checkerboard[0])]
corrected_checkerboard, shift_log, error_frames = frame_error_correction(signals, unpacked_checkerboard, algo="nw")
print(shift_log, len(error_frames))