-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtaglameter.py
380 lines (281 loc) · 12.4 KB
/
taglameter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
import pyaudio
import numpy as np
import time
import os
#import matplotlib.pyplot as plt
def main(subID = ''):
port = pyaudio.PyAudio() # Opens a pyAudio streaming port
print "\n ---------------------------------------------------"
print " | Welcome to the incredible TaboGladdoAudioMeter! |"
print " ---------------------------------------------------\n\n"
# Subject ID can be intered as an argument of this function. If not,
# the ID is requested via keyboard input
if subID == '':
subID = raw_input('Enter participants ID: ')
if subID.lower() in ["falken", "stephen falken"]:
print "Greetings, Professor Falken."
# Calibration is triggered through the subject ID, we need to change this
if subID.lower() == 'calibrateplease':
calibrate(port)
return()
print "Loading...."
time.sleep(1)
print "\n"
chan = ['left', 'right']
# Thresholding is done sequentially for each frequency in par['freq']
for ichan in chan:
par = loadParameters(ichan) # Loads the audiometer parameters
threshold = np.zeros(len(par['freq']))
for fIx, freq in enumerate(par['freq']):
print 'Testing {} f = {:.0f}Hz'.format(ichan, freq)
threshold[fIx] = measureThreshold(fIx, port, par, chan)
print '---- done! Threshold {} at {:.1f} dB \n'.format(ichan, threshold[fIx])
saveThresholds(threshold, subID, par, chan)
port.terminate()
# * Add plots of the thresholds here? It would be better to print the
# plots using plain text so that the audiometer can run completely
# without a window manager. Also: it would be fun to implement. <-- ToDo.
raw_input('Press ENTER to exit...')
def saveThresholds(threshold, subID, par, chan):
"""
Saves the hearing thresholds in a plain textfile.
Inputs
threshold : array with the measured thresholds (dB). Each value
corresponds to a frequency value in par['freq']
subID : subject ID
par : dictionary with the parameters, check loadParameters()
"""
header = "Audiometry for subject {}".format(subID)
footer = "\n// Powered by the TaboGladdoAudioMeter!"
headLine = " |".join(["{:>7.0f}".format(f) for f in par['freq']])
resLine = " |".join(["{:>7.1f}".format(t) for t in threshold])
if chan == 'left':
headLine = "Freq L (Hz) |" + headLine
else:
headLine = "Freq R (Hz) |" + headLine
resLine = "Thresh (dB) |" + resLine
header = header + "\n" + "-" * len(headLine) + "\n"
footer = "\n" + "-" * len(headLine) + "\n" + footer
if not os.path.isdir('./out/'):
os.makedirs('./out/')
resFile = "./out/audioThresh-{}".format(subID)
f = open(resFile, "w")
f.write(header + headLine + "\n" + resLine + footer)
f.close()
print "Audiometry completed! Results saved to {}\n".format(resFile)
print header + headLine + "\n" + resLine + footer
print "\n\n"
# * It would be cool to implement a function that automatically reads the
# textfile and stores the thresholds in an array or list. <-- ToDo.
def measureThreshold(fIx, port, par, chan):
"""
Runs the algorithm to measure the hearing threshold of a given frequency.
It uses the "up-5 down-10" technique. The process is repeated until
participant responds to two out of three repetitions.
Inputs
fIx : integer such that par['freq'][fIx] = tested frequency (Hz)
port : pyAudio port to stream the sound
par : dictionary with the parameters, check loadParameters()
Outputs
key : key pressed, None if no key is pressed.
"""
from collections import Counter
freq = par['freq'][fIx] # fIx : current frequency = par['freq'][lIx]
lIx = par['l0Ix'] # lIx : current loudness = par['loud'][lIx]
prev = True # Flags if the sound was heared in the previous iteration
last_spl = [] # Remember last SPL played.
max_played = 0
# Need to test left and right channels separately.
for chan in ['left', 'right']:
while True:
# Wait for a normally distributed amount of time
time.sleep(par['avgT'] + par['sgmT'] * abs(np.random.randn()))
print " -> loudness = {:>5.1f}dB".format(par['loud'][lIx])
# Return key press.
key = playTone(freq, par['A0'][fIx][lIx], port, par, chan)
print ""
# Count number of SPL occurences.
num_played = Counter(last_spl)
if num_played: # Check if not empty.
max_played = num_played[max(num_played, key=num_played.get)]
if key is not None:
if prev:
if lIx != 0 and max_played < 3: # if loudness index is not at min level
lIx = max(0, lIx - 2) # two down, with saturation at 0
prev = True
else: # if loudness is at minimum, return minimum
return par['loud'][lIx]
else:
return par['loud'][lIx]
elif key is None:
if max_played < 4:
lIx += 1 # one up
else:
return par['loud'][lIx]
prev = False
last_spl.append(lIx)
def playTone(f, a0, port, par, chan):
"""
Plays a pure tone and waits for a key press through a limited amount of
time. Sampling rate, duration, and waiting time are parameters stored in
par.
Inputs
f : pure tone frequency (Hz)
a0 : waveform amplitude (0 < a0 < 1)
port : pyAudio port to stream the sound
par : dictionary with the parameters, check loadParameters()
Outputs
key : key pressed, None if no key is pressed.
"""
streamer = PyAudioStreamer(f, a0, par, chan)
stream = port.open(format = pyaudio.paFloat32,
channels = 2,
rate = par['fs'],
output = True,
stream_callback = streamer.callback)
stream.start_stream()
key = listenKeyPress(par['dur'] + 2 * par['tau'] + par['wait'])
stream.stop_stream()
stream.close()
return(key)
def listenKeyPress(waitTime, terminateOnPress = False, verbose = True):
"""
Listens keyboard keypress actions for a specified amount of time.
* Mouse inputs should be integrated here (nightmare alert!) <-- ToDo
Inputs
waitTime : maximum waiting time (seconds)
aterminateOnPress : function exits on key press if set to True
verbosity : verbose flag (True/False)
Outputs
key : first pressed key, None if no key is pressed
"""
import termios, fcntl, sys, os
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
noKeyPresed = True
key = None
startTime = time.time()
try:
while (time.time() - startTime) < waitTime and noKeyPresed:
try:
key = sys.stdin.read()
noKeyPresed = True
if verbose:
print ' - response! (key pressed = {})'.format(key),
break
except IOError: pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
if not(terminateOnPress) and (time.time() - startTime) < waitTime:
time.sleep(waitTime - time.time() + startTime)
return(key)
def loadParameters(chan):
"""
Returns the audiometer parameters. All frequencies and the initial
loudness must belong to the values stored in the calibration file.
"""
# Audiometer parameters -- Feel free to mess around! :)
par = {'dur' : 1.1, # tone duration including ramps (seconds)
'fs' : 44100, # sample rate Hz
'tau' : 0.05, # ramps time windows
'avgT' : 1, # average pause time between tones (seconds)
'sgmT' : 1, # std of pause time between tones (seconds)
'wait' : 1, # response waiting time after offset (seconds)
'l0' : 30, # initial loudness for thresholding (dB SPL)
'freq' : [.25, .5, 1, 2, 4, 8, 12.5, 14], # kHz
'calf' : '' # placeholder path to calibration file
}
# Choose calibration based on left and right channel.
if chan == 'left':
par['calf'] = './calibration_left.npz'
else:
par['calf'] = './calibration_right.npz'
# Transforms --- touching this can be punished with death (by spoon!)
par['freq'] = np.array(par['freq']) * 1000
calFile = np.load(par['calf'])
par['loud'] = calFile['LOUD']
par['l0Ix'] = [ix for ix, loud in enumerate(par['loud'])
if loud == par['l0']][0]
par['A0'] = [calFile['A0'][ix] for ix in [ix
for ix, F in enumerate(calFile['FREQ'])
for f in par['freq'] if F==f]]
if par['fs'] != calFile['FS']:
print "WARNING! Sample rate set to {}Hz ".format(par['fs']),
print "but calibration was performed at {}Hz".format(calFile['FS'])
return par
class PyAudioStreamer:
"""
Support class wrapping the callback function for pyAudio stream.
Inputs
sound : sound waveform
"""
def __init__(self, f, a0, par, chan):
self.createTone(f, a0, par, chan)
self.lastFrame = 0
def callback(self, in_data, frame_count, time_info, status):
"""
Callback method for pyAudio stream. Returns a chunk of the provided
sound. The sound must be provided when invoking the class Streamer.
Inputs
in_data : arg required by pyAudio but not in use
frame_count : chunk size (integer)
time_info : arg required by pyAudio but not in use
status : arg required by pyAudio but not in use
Outputs
chunk : excerpt of the sound waveform
finished : flag that marks if the sound waveform is deplected
"""
prevFrame = self.lastFrame
self.lastFrame = prevFrame + frame_count
if self.lastFrame >= len(self.sound):
finished = True
self.lastFrame = len(self.sound)
else:
finished = False
chunk = self.sound[prevFrame:self.lastFrame]
return (chunk, finished)
def createTone(self, f, a0, par, chan):
"""
Creates a pure tone.
* It would be better to generate the tone online. <-- ToDo.
Inputs
f : pure tone frequency (Hz)
a0 : waveform amplitude (0 < a0 < 1)
par : dictionary specifyint the duration, sampling rate, and
hamming window ramp/damp size; check loadParameters()
chan : 'left' or 'right' channel
Outputs
sound : sound waveform
"""
from struct import pack
x = np.linspace(0, par['dur'], int(par['dur'] * par['fs']));
omega = 2 * np.pi * f
sound = []
sound = ''
if chan == 'left':
print('left')
for ix in x:
sound += pack('h', (a0 * np.sin(omega * ix)).astype(np.float32))
sound += pack('h', (0 * np.sin(omega * ix)).astype(np.float32))
elif chan == 'right':
print('right')
for ix in x:
sound.append((0 * np.sin(omega * ix)).astype(np.float32))
sound.append((a0 * np.sin(omega * ix)).astype(np.float32))
if par['tau'] > 0:
hL = int(par['tau'] * par['fs'])
hw = np.hamming(2 * hL)
sound[:hL ] = sound[:hL ] * hw[:hL]
sound[-hL:] = sound[-hL:] * hw[hL:]
sound = (np.concatenate((np.zeros((hL,)),
sound,
np.zeros((hL,))))).astype(np.float32)
self.sound = sound
if __name__ == "__main__":
main()