This commit is contained in:
yumoqing 2024-09-14 14:49:32 +08:00
parent 6e923dbc74
commit bc6e9233dd

View File

@ -72,7 +72,7 @@ class AudioTrackVad(MediaStreamTrack):
async def vad_check(self, inframe): async def vad_check(self, inframe):
frames = self.resample(inframe) frames = self.resample(inframe)
frame = frames[0]: frame = frames[0]
is_speech = self.vad.is_speech(self.frame2bytes(frame), is_speech = self.vad.is_speech(self.frame2bytes(frame),
self.sample_rate) self.sample_rate)
if not self.triggered: if not self.triggered:
@ -101,10 +101,13 @@ class AudioTrackVad(MediaStreamTrack):
# audio we've collected. # audio we've collected.
if num_unvoiced > 0.9 * self.ring_buffer.maxlen: if num_unvoiced > 0.9 * self.ring_buffer.maxlen:
self.triggered = False self.triggered = False
ret = await self.write_wave() duration = self.voice_duration()
# ret = await self.gen_base64() if duration > 500 and self.onvoiceend:
if self.onvoiceend: ret = await self.write_wave()
await self.onvoiceend(ret) await self.onvoiceend(ret)
else:
print(f'{duration=} {self.onvoiceend=}')
self.ring_buffer.clear() self.ring_buffer.clear()
self.voiced_frames = [] self.voiced_frames = []
@ -121,11 +124,17 @@ class AudioTrackVad(MediaStreamTrack):
b64 = base64.b64encode(audio_data).decode('utf-8') b64 = base64.b64encode(audio_data).decode('utf-8')
return b64 return b64
def voice_duration(self):
duration = 0
[ f.samples * 1000 / f.sample_rate + duration for f in self.self.voiced_frames ]
return duration
async def write_wave(self): async def write_wave(self):
"""Writes a .wav file. """Writes a .wav file.
Takes path, PCM audio data, and sample rate. Takes path, PCM audio data, and sample rate.
""" """
audio_data = self.to_mono16000_data() audio_data = self.to_mono16000_data()
path = temp_file(suffix='.wav') path = temp_file(suffix='.wav')
# print(f'temp_file={path}') # print(f'temp_file={path}')