Skip to content

Commit

Permalink
logger level integration
Browse files Browse the repository at this point in the history
added logger levels instead of boolean print option (info, warning, none)
  • Loading branch information
LMBooth committed May 28, 2023
1 parent 97441f6 commit a418fbb
Show file tree
Hide file tree
Showing 10 changed files with 110 additions and 110 deletions.
2 changes: 1 addition & 1 deletion pybci/Examples/testSimple.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
while(True):
currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing
time.sleep(1) # wait for marker updates
print("Markers received: " + str(currentMarkers) +" Class accuracy: " + str(accuracy))#, end="\r")
print("Markers received: " + str(currentMarkers) +" Class accuracy: " + str(accuracy), end="\r")
if len(currentMarkers) > 1: # check there is more then one marker type received
if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired:
classInfo = bci.CurrentClassifierInfo() # hangs if called too early
Expand Down
10 changes: 1 addition & 9 deletions pybci/ThreadClasses/AsyncDataReceiverThread.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,6 @@ def run(self):
if self.startCounting: # we received a marker
posCount += 1
if posCount >= self.desiredCount: # enough samples are in FIFO, chop up and put in dataqueue
start = time.time()

if len(self.customEpochSettings.keys())>0: # custom marker received
if self.customEpochSettings[self.currentMarker].splitCheck: # slice epochs into overlapping time windows
window_length = self.customEpochSettings[self.currentMarker].windowLength
Expand Down Expand Up @@ -93,8 +91,6 @@ def run(self):
sliceDataFIFOs = [slice_fifo_by_time(fifo, start_time, end_time) for fifo in dataFIFOs]
self.dataQueueTrain.put([sliceDataFIFOs, self.currentMarker, self.sr, self.devCount])
# reset flags and counters
end = time.time()
print(f"Data slicing process time {end - start}")
posCount = 0
self.startCounting = False
else: # in Test mode
Expand All @@ -115,12 +111,9 @@ def run(self):
window_end_time = timestamp + window_length
else:
pass
# add levels of debug
# print("PyBCI: LSL pull_sample timed out, no data on stream...")

# add levels of debug?

def ReceiveMarker(self, marker, timestamp): # timestamp will be used for non sample rate specific devices (pupil-labs gazedata)
#print(marker)
if self.startCounting == False: # only one marker at a time allow, other in windowed timeframe ignored
self.currentMarker = marker
self.markerTimestamp = timestamp
Expand All @@ -131,4 +124,3 @@ def ReceiveMarker(self, marker, timestamp): # timestamp will be used for non sam
else: # no custom markers set, use global settings
self.desiredCount = int(self.globalEpochSettings.tmax * self.sr) # find number of samples after tmax to finish counting
self.startCounting = True
#print(self.desiredCount)
24 changes: 11 additions & 13 deletions pybci/ThreadClasses/ClassifierThread.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from ..Utils.Classifier import Classifier
from ..Utils.Logger import Logger
import queue,threading, time

class ClassifierThread(threading.Thread):
Expand All @@ -8,7 +9,7 @@ class ClassifierThread(threading.Thread):
guess = None
epochCounts = {}
def __init__(self, closeEvent,trainTestEvent, featureQueueTest,featureQueueTrain, classifierInfoQueue, classifierInfoRetrieveEvent,
classifierGuessMarkerQueue, classifierGuessMarkerEvent, printDebug = True, numStreamDevices = 1,
classifierGuessMarkerQueue, classifierGuessMarkerEvent, logger = Logger(Logger.INFO), numStreamDevices = 1,
minRequiredEpochs = 10, clf = None, model = None, torchModel = None):
super().__init__()
self.trainTestEvent = trainTestEvent # responsible for tolling between train and test mode
Expand All @@ -22,7 +23,7 @@ def __init__(self, closeEvent,trainTestEvent, featureQueueTest,featureQueueTrain
self.classifierGuessMarkerQueue = classifierGuessMarkerQueue
self.classifierGuessMarkerEvent = classifierGuessMarkerEvent
self.numStreamDevices = numStreamDevices
self.printDebug = printDebug
self.logger = logger

def run(self):
if self.numStreamDevices > 1:
Expand All @@ -48,31 +49,29 @@ def run(self):
# need to check if all device data is captured, then flatten and append
if len(self.epochCounts) > 1: # check if there is more then one test condition
minNumKeyEpochs = min([self.epochCounts[key][1] for key in self.epochCounts]) # check minimum viable number of training eochs have been obtained
#print("minNumKeyEpochs"+str(minNumKeyEpochs))
if minNumKeyEpochs < self.minRequiredEpochs:
pass
else:
start = time.time()
self.classifier.TrainModel(self.features, self.targets)
if (self.printDebug):
if (self.logger.level == Logger.INFO):
end = time.time()
print(f"PyBCI: Info - classifier training time {end - start}")
self.logger.log(Logger.INFO, f" classifier training time {end - start}")
if self.classifierGuessMarkerEvent.is_set():
self.classifierGuessMarkerQueue.put(None)
else:
self.targets.append(target)
self.features.append(featuresSingle)
if len(self.epochCounts) > 1: # check if there is more then one test condition
minNumKeyEpochs = min([self.epochCounts[key][1] for key in self.epochCounts]) # check minimum viable number of training eochs have been obtained
#print("minNumKeyEpochs"+str(minNumKeyEpochs))
if minNumKeyEpochs < self.minRequiredEpochs:
pass
else:
start = time.time()
self.classifier.TrainModel(self.features, self.targets)
if (self.printDebug):
if (self.logger.level == Logger.INFO):
end = time.time()
print(f"PyBCI: Info - classifier training time {end - start}")
self.logger.log(Logger.INFO, f" classifier training time {end - start}")
if self.classifierGuessMarkerEvent.is_set():
self.classifierGuessMarkerQueue.put(None)
except queue.Empty:
Expand All @@ -87,18 +86,17 @@ def run(self):
for value in tempdatatest.values():
flattened_list.extend(value)
tempdatatest = {}
#self.features.append(flattened_list)
start = time.time()
self.guess = self.classifier.TestModel(flattened_list)
if (self.printDebug):
if (self.logger.level == Logger.INFO):
end = time.time()
print(f"PyBCI: Info - classifier testing time {end - start}")
self.logger.log(Logger.INFO, f" classifier testing time {end - start}")
else:
start = time.time()
self.guess = self.classifier.TestModel(featuresSingle)
if (self.printDebug):
if (self.logger.level == Logger.INFO):
end = time.time()
print(f"PyBCI: Info - classifier testing time {end - start}")
self.logger.log(Logger.INFO, f" classifier testing time {end - start}")
if self.classifierGuessMarkerEvent.is_set():
self.classifierGuessMarkerQueue.put(self.guess)
except queue.Empty:
Expand Down
7 changes: 0 additions & 7 deletions pybci/ThreadClasses/DataReceiverThread.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import threading
import time
from collections import deque
import itertools

Expand All @@ -21,7 +20,6 @@ def __init__(self, closeEvent, trainTestEvent, dataQueueTrain,dataQueueTest, dat
self.globalEpochSettings = globalEpochSettings
self.streamChsDropDict = streamChsDropDict
self.sr = dataStreamInlet.info().nominal_srate()
#self.dataType = dataStreamInlet.info().type()
self.devCount = devCount # used for tracking which device is sending data to feature extractor

def run(self):
Expand All @@ -32,7 +30,6 @@ def run(self):
if max([self.customEpochSettings[x].tmin + self.customEpochSettings[x].tmax for x in self.customEpochSettings]) > maxTime:
maxTime = max([self.customEpochSettings[x].tmin + self.customEpochSettings[x].tmax for x in self.customEpochSettings])
fifoLength = int(self.dataStreamInlet.info().nominal_srate()*maxTime)
#print(fifoLength)
dataFIFOs = [deque(maxlen=fifoLength) for ch in range(chCount - len(self.streamChsDropDict))]
while not self.closeEvent.is_set():
sample, timestamp = self.dataStreamInlet.pull_sample(timeout = 1)
Expand All @@ -45,7 +42,6 @@ def run(self):
if self.startCounting: # we received a marker
posCount+=1
if posCount >= self.desiredCount: # enough samples are in FIFO, chop up and put in dataqueue
#start = time.time()
if len(self.customEpochSettings.keys())>0: # custom marker received
if self.customEpochSettings[self.currentMarker].splitCheck: # slice epochs in to overlapping time windows
window_samples =int(self.customEpochSettings[self.currentMarker].windowLength * self.sr) #number of samples in each window
Expand Down Expand Up @@ -90,10 +86,8 @@ def run(self):
else:
pass
# add levels of debug
# print("PyBCI: LSL pull_sample timed out, no data on stream...")

def ReceiveMarker(self, marker, timestamp): # timestamp will be used for non sample rate specific devices (pupil-labs gazedata)
#print(marker)
if self.startCounting == False: # only one marker at a time allow, other in windowed timeframe ignored
self.currentMarker = marker
if len(self.customEpochSettings.keys())>0: # custom marker received
Expand All @@ -103,4 +97,3 @@ def ReceiveMarker(self, marker, timestamp): # timestamp will be used for non sam
else: # no custom markers set, use global settings
self.desiredCount = int(self.globalEpochSettings.tmax * self.sr) # find number of samples after tmax to finish counting
self.startCounting = True
#print(self.desiredCount)
4 changes: 0 additions & 4 deletions pybci/ThreadClasses/FeatureProcessorThread.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,7 @@
import threading, queue
from ..Utils.FeatureExtractor import GenericFeatureExtractor
from ..Configuration.EpochSettings import GlobalEpochSettings
import time

class FeatureProcessorThread(threading.Thread):

tempDeviceEpochLogger = []
def __init__(self, closeEvent, trainTestEvent, dataQueueTrain,dataQueueTest,
featureQueueTest,featureQueueTrain, totalDevices,markerCountRetrieveEvent,markerCountQueue, customEpochSettings = {},
Expand All @@ -25,7 +22,6 @@ def __init__(self, closeEvent, trainTestEvent, dataQueueTrain,dataQueueTest,
self.customEpochSettings = customEpochSettings
self.globalWindowSettings = globalEpochSettings
self.tempDeviceEpochLogger = [0 for x in range(self.totalDevices)]

def run(self):
while not self.closeEvent.is_set():
if self.markerCountRetrieveEvent.is_set():
Expand Down
2 changes: 0 additions & 2 deletions pybci/ThreadClasses/MarkerThread.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
class MarkerThread(threading.Thread):
"""Receives Marker on chosen lsl Marker outlet. Pushes marker to data threads for framing epochs,
also sends markers to featureprocessing thread for epoch counting and multiple device synchronisation.
"""
def __init__(self,closeEvent, trainTestEvent, markerStreamInlet, dataThreads, featureThreads):#, lock):
super().__init__()
Expand All @@ -23,7 +22,6 @@ def run(self):
thread.ReceiveMarker(marker, timestamp)
for thread in self.featureThreads:
thread.ReceiveMarker(marker, timestamp)
#self.featureThread.ReceiveMarker(marker, timestamp)
else:
pass
# add levels of debug
Expand Down
6 changes: 3 additions & 3 deletions pybci/Utils/FeatureExtractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ def __init__(self, freqbands = [[1.0, 4.0], [4.0, 8.0], [8.0, 12.0], [12.0, 20.0
super().__init__()
self.freqbands = freqbands
self.featureChoices = featureChoices
for key, value in self.featureChoices.__dict__.items():
print(f"{key} = {value}")
#for key, value in self.featureChoices.__dict__.items():
# print(f"{key} = {value}")
selFeats = sum([self.featureChoices.appr_entropy,
self.featureChoices.perm_entropy,
self.featureChoices.spec_entropy,
Expand Down Expand Up @@ -46,7 +46,7 @@ def ProcessFeatures(self, epoch, sr, target):
sr = samplerate of current device
Returns:
features = 2D numpy array of size (chs, (len(freqbands) + sum(True in self.featureChoices)))
target = same as input target
target = same as input target, can be useful for using a baseline number differently
NOTE: Any channels with a constant value will generate warnings in any frequency based features (constant level == no frequency components).
"""
#print(np.array(epoch).shape)
Expand Down
Loading

0 comments on commit a418fbb

Please sign in to comment.