Issue
I am trying to play an audio signal with simpleaudio inside a GUI application where the user should react to the content of the chunk played and push a button. At the moment the users push the button I would like to change to the next track. This is all done using the slot and signals from qt in Python 3.x using pytq5. Even when my GUI does not freeze I do not understand why I cannot read the button action during (between) the chunks of audio that are been played, instead all actions are readed after all tracks finish.
My code looks like this: Module to handle the tracks and chunks
import simpleaudio as sa
import numpy as np
class MusicReactor:
def __init__(self):
self.timeProTone = ...
self.deltaT = ...
self.maxVolSkal = ...
self.minVolSkal = ...
self.frequencySample = ...
self.currentTestedEar = ...
def test_function(self, frequency):
# array of time values
times = np.arange(0, self.timeProTone, self.deltaT)
# generator to create a callable object with new volume each time
for time in times:
# get the volume and set the volume to the starting sequence
currentVolume = (self.maxVolSkal-self.minVolSkal)/self.timeProTone * time + self.minVolSkal
self.setVolumeScalar(currentVolume)
# create chunk the tone as a numpy array
audio = createTone(frequency, self.deltaT, self.frequencySample, self.currentTestedEar)
yield audio, currentVolume
def createTone(frequency, duration, frequencySampled, currentTestedEar = TestEar.Both):
# Generate array with seconds*sample_rate steps, ranging between 0 and seconds
tt = np.linspace((0, 0), (duration, duration), int(duration * frequencySampled), False)
#populate the other ear with zeros
if currentTestedEar is not TestEar.Both:
tt[:, 1-currentTestedEar.value] = 0 # This strategy works only if the note creation i a sinusoidal : sin(0) = 0
# Generate a 440 Hz sine wave
note = np.sin(frequency * tt * 2 * np.pi)
# normalize to 16-bit range
note *= 32767 / np.max(np.abs(note))
# Ensure that highest value is in 16-bit range
audio = note * (2 ** 15 - 1) / np.max(np.abs(note))
# Convert to 16-bit data
audio = audio.astype(np.int16)
return audio
def playTone(audio, frequencySample, num_channels=1, bytes_per_sample=2):
# Start playback
play_obj = sa.play_buffer(audio, num_channels, bytes_per_sample, frequencySample)
# Wait for playback to finish before exiting
play_obj.wait_done()
def generateRndFreq(minF,maxF):
freq = np.random.uniform(low=minF, high=maxF)
return freq
Now the GUI class and its corresponding worker class
class HearingTest_ui(QWidget):
# Send info through signals to subthreads
sig_int_sender = pyqtSignal(int)
hearingObjSender = pyqtSignal( Hearing.HearingTest)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
uic.loadUi("testForm.ui", self)
self.Both_rB.toggled.connect(self.onTogle_earTested)
self.Links_rB.toggled.connect(self.onTogle_earTested)
self.Recht_rB.toggled.connect(self.onTogle_earTested)
# Method 2 Test
self.ML_startButton.clicked.connect(self.runMethod2Test)
self.setMaxMLProgressBar()
self.ml_nTests = self.ML_spinBox.value()
self.ML_spinBox.valueChanged.connect(self.setNTests)
self.ML_spinBox.valueChanged.connect(self.setMaxMLProgressBar)
# Hearing Test Object
self.HT = Hearing.MusicReactor()
def runMethod2Test(self):
# Preprocessing
self.HT.choose_ear(self.testedEarTuple) # reads a tpogle to assign a channel for the chunk of music
# thread and worker configuration
# Step 2: Create a QThread object
self.ml_thread = QThread(parent=self)
# Step 3: Create a worker object
self.ml_worker = ML_Worker(self.ml_nTests)
# Step 4: Move worker to the thread
self.ml_worker.moveToThread(self.ml_thread)
# Step 5: Connect signals and slots
#self.ml_thread.started.connect(partial(self.ml_worker.actualLongTaskFromHearingTest, self.HT))
self.hearingObjSender.connect(self.ml_worker.actualLongTaskFromHearingTest)
self.ml_worker.progress.connect(self.updateProgressbar)
self.ML_spinBox.valueChanged.connect(self.ml_worker.set_maxTests)
self.sig_int_sender.connect(self.ml_worker.set_maxTests)
self.ML_yesButton.clicked.connect(self.ml_worker.change_Flag)
self.ml_worker.request_playchunk.connect(self.ml_worker.sendAudio2queue)
self.ml_worker.finished.connect(self.ml_thread.quit)
self.ml_worker.finished.connect(self.ml_worker.deleteLater)
self.ml_thread.finished.connect(self.ml_thread.deleteLater)
# Final resets
self.ml_worker.changeButtonStatus.connect(self.ML_startButton.setEnabled)
# start thread
print("clicked runMethodOfLimits")
self.ml_thread.start()
self.hearingObjSender.emit(self.HT)
class ML_Worker(QObject):
finished = pyqtSignal()
progress = pyqtSignal(int)
retrieve = pyqtSignal()
changeButtonStatus = pyqtSignal(bool)
request_playchunk = pyqtSignal(np.ndarray, int, Hearing.MusicReactor)
def __init__(self,nTest):
super().__init__()
self.__abort = False
self.nTests = nTest
self.MoLFlag = False
def abort(self):
self.__abort = True
@pyqtSlot(int)
def set_maxTests(self, val):
print(type(val))
logging.info(f"set_maxTests.... {val}")
self.nTests = val
@pyqtSlot()
def change_Flag(self):
print("clicked")
self.MoLFlag = True
# def of long runnning task
@pyqtSlot(Hearing.MusicReactor)
def actualLongTaskFromHearingTest(self, HTObj):
self.changeButtonStatus.emit(False)
self.progress.emit(0)
self.retrieve.emit()
print(self.nTests)
start = 0
for i in range(self.nTests):
self.MoLFlag = False
j = i + 1
print("start", i)
# create the frequency for the test
chunk_freq = Hearing.generateRndFreq(0, 10000)
#create chunks as generator
for chunk, volume in HTObj.test_function(chunk_freq):
# play chunk of the audio
self.request_playchunk.emit(chunk, 2, HTObj) # this is my current method, by using the signals and slots
# Hearing.playTone(chunk, HTObj.frequencySample, num_channels=2)^# previously I tried something like this, which resulted in the same behavior
print(volume)
if self.MoLFlag:
print(self.MoLFlag)
break
self.progress.emit(j)
self.changeButtonStatus.emit(True)
self.finished.emit()
@pyqtSlot(np.ndarray, int, Hearing.MusicReactor)
def sendAudio2queue(self, chunk, channels, HTObj):
Hearing.playTone(chunk, HTObj.frequencySample, num_channels=channels)
If somebody could take I look I would be very gratefull. I would really like to understand why this is happening. I believe it has something to do with the thread queue, probably I would need to open a new thread which is in charge of the music while the otherone takes care of the GUI reactions, but still I do not understand why it does not break the loop (with the generator) when I click the "ML_yesButton".
Solution
It is not necessary to use threads in this case. The wait_done()
method blocks the thread where it is executed so that the application does not terminate before finishing playing the audio.
In this case a QTimer can be used to check if the audio finished playing.
import simpleaudio as sa
import numpy as np
from PyQt5.QtCore import pyqtSignal, QObject, Qt, QTimer
from PyQt5.QtWidgets import QApplication, QPushButton, QVBoxLayout, QWidget
class AudioManager(QObject):
started = pyqtSignal()
finished = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self._play_obj = None
self._timer = QTimer(interval=10)
self._timer.timeout.connect(self._handle_timeout)
def start(self, audio_data, num_channels, bytes_per_sample, sample_rate):
self._play_obj = sa.play_buffer(
audio_data, num_channels, bytes_per_sample, sample_rate
)
self._timer.start()
self.started.emit()
def stop(self):
if self._play_obj is None:
return
self._play_obj.stop()
self._play_obj = None
self.finished.emit()
def _handle_timeout(self):
if self._play_obj is None:
return
if not self.running():
self.stop()
def running(self):
return self._play_obj.is_playing()
def create_tone(duration, fs, f):
tt = np.linspace((0, 0), (duration, duration), int(duration * fs), False)
note = np.sin(f * tt * 2 * np.pi)
note *= 32767 / np.max(np.abs(note))
audio = note * (2 ** 15 - 1) / np.max(np.abs(note))
audio = audio.astype(np.int16)
return audio
class Widget(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.audio_manager = AudioManager()
self.audio_manager.started.connect(self.handle_started)
self.audio_manager.finished.connect(self.handle_finished)
self.button = QPushButton("Start", checkable=True)
self.button.toggled.connect(self.handle_toggled)
lay = QVBoxLayout(self)
lay.addWidget(self.button, alignment=Qt.AlignCenter)
def handle_toggled(self, state):
if state:
frequency = np.random.uniform(low=0, high=10000)
tone = create_tone(60, 1000, frequency)
self.audio_manager.start(tone, 1, 2, 16000)
else:
self.audio_manager.stop()
self.button.setText("Start")
def handle_started(self):
self.button.setChecked(True)
self.button.setText("Stop")
def handle_finished(self):
self.button.setChecked(False)
self.button.setText("Start")
def main():
import sys
app = QApplication(sys.argv)
widget = Widget()
widget.resize(640, 480)
widget.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
Answered By - eyllanesc
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.