diff --git a/rtcllm/vad.py b/rtcllm/vad.py index f687e51..6d627cb 100644 --- a/rtcllm/vad.py +++ b/rtcllm/vad.py @@ -50,8 +50,8 @@ class AudioTrackVad(MediaStreamTrack): oldf = await self.track.recv() self.sample_rate = oldf.sample_rate duration = (oldf.samples * 1000) / oldf.sample_rate - print(f'{self.__class__.__name__}.recv(): {duration=}, {oldf.samples=}, {oldf.sample_rate=}') - frames = self.resample(oldf) + # print(f'{self.__class__.__name__}.recv(): {duration=}, {oldf.samples=}, {oldf.sample_rate=}') + try: for f in frames: if self.debug: self.debug = False @@ -75,10 +75,13 @@ class AudioTrackVad(MediaStreamTrack): frame = r.resample(frame) return frame - async def vad_check(self, frame): - is_speech = self.vad.is_speech(self.frame2bytes(frame), self.sample_rate) + async def vad_check(self, inframe): + frames = self.resample(inframe) + frame = frames[0]: + is_speech = self.vad.is_speech(self.frame2bytes(frame), + self.sample_rate) if not self.triggered: - self.ring_buffer.append((frame, is_speech)) + self.ring_buffer.append((inframe, is_speech)) num_voiced = len([f for f, speech in self.ring_buffer if speech]) # If we're NOTTRIGGERED and more than 90% of the frames in # the ring buffer are voiced frames, then enter the @@ -95,7 +98,7 @@ class AudioTrackVad(MediaStreamTrack): else: # We're in the TRIGGERED state, so collect the audio data # and add it to the ring buffer. - self.voiced_frames.append(frame) + self.voiced_frames.append(inframe) self.ring_buffer.append((frame, is_speech)) num_unvoiced = len([f for f, speech in self.ring_buffer if not speech]) # If more than 90% of the frames in the ring buffer are