bugfix
This commit is contained in:
parent
cc73b12301
commit
7000471bbe
70
rtcllm/rec.py
Normal file
70
rtcllm/rec.py
Normal file
@ -0,0 +1,70 @@
|
||||
import pyaudio
|
||||
import av
|
||||
import numpy as np
|
||||
from utils import frames_write_wave
|
||||
import sys
|
||||
import select
|
||||
from vad import MyVad, bytes2frame, to16000_160_frames
|
||||
|
||||
def check_input():
|
||||
if select.select([sys.stdin], [], [], 0.0)[0]:
|
||||
input_str = sys.stdin.readline().strip()
|
||||
return input_str
|
||||
else:
|
||||
return None
|
||||
|
||||
def mic(seconds=None, frames_ms=20):
|
||||
# PyAudio setup
|
||||
#
|
||||
p = pyaudio.PyAudio()
|
||||
|
||||
# Audio format
|
||||
FORMAT = pyaudio.paInt16 # 16-bit resolution
|
||||
CHANNELS = 1 # 1 channel for mono
|
||||
RATE = 16000 # 44.1kHz sampling rate
|
||||
CHUNK = int(RATE / 100) * 2 # 1024 samples per frame
|
||||
# PyAudio input stream
|
||||
stream = p.open(format=FORMAT,
|
||||
channels=CHANNELS,
|
||||
rate=RATE,
|
||||
input=True,
|
||||
frames_per_buffer=CHUNK)
|
||||
|
||||
print("Recording and streaming audio...")
|
||||
if seconds is not None:
|
||||
cnt = int(seconds * 1000 / frames_ms)
|
||||
else:
|
||||
cnt = 0
|
||||
i = 0
|
||||
while True:
|
||||
if cnt == 0:
|
||||
if check_input() == 'q':
|
||||
break
|
||||
else:
|
||||
if i>=cnt:
|
||||
break
|
||||
# Read audio frames from the microphone
|
||||
data = stream.read(CHUNK)
|
||||
# print(f'{data.__class__.__name__}, {len(data)=}, {CHUNK=}')
|
||||
frames = to16000_160_frames(bytes2frame(data))
|
||||
for frame in frames:
|
||||
yield frame
|
||||
i += 1
|
||||
|
||||
# Close the audio stream and the container
|
||||
stream.close()
|
||||
p.terminate()
|
||||
|
||||
if __name__ == '__main__':
|
||||
def cb(f):
|
||||
print(f'{f} voice wave file')
|
||||
|
||||
i = 0
|
||||
vad = MyVad(callback=cb)
|
||||
for f in mic():
|
||||
if i == 0:
|
||||
print(f'{f.sample_rate=}, {f.samples=}, {f.layout=} ')
|
||||
i += 1
|
||||
vad.vad_check(f)
|
||||
|
||||
print(f'record save to {f}')
|
64
rtcllm/utils.py
Normal file
64
rtcllm/utils.py
Normal file
@ -0,0 +1,64 @@
|
||||
import pyaudio
|
||||
import av
|
||||
from av import AudioFrame
|
||||
import numpy as np
|
||||
|
||||
from appPublic.folderUtils import temp_file
|
||||
|
||||
# 录音参数
|
||||
|
||||
def MonoMircoPhone():
|
||||
CHUNK = 1024
|
||||
FORMAT = pyaudio.paInt16
|
||||
CHANNELS = 1
|
||||
RATE = 44100
|
||||
RECORD_SECONDS = 10
|
||||
|
||||
# 初始化PyAudio
|
||||
p = pyaudio.PyAudio()
|
||||
|
||||
# 打开音频流
|
||||
stream = p.open(format=FORMAT,
|
||||
channels=CHANNELS,
|
||||
rate=RATE,
|
||||
input=True,
|
||||
frames_per_buffer=CHUNK)
|
||||
|
||||
print("开始录音...")
|
||||
frames = []
|
||||
|
||||
for _ in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
|
||||
data = stream.read(CHUNK)
|
||||
ndarr = np.frombuffer(data, dtype=np.int16)
|
||||
print(ndarr.shape)
|
||||
c = ndarr.shape[0]
|
||||
ndarr.reshape(c,1)
|
||||
print(ndarr.shape)
|
||||
|
||||
frame = AudioFrame.from_ndarray(ndarr, format='s16', layout='mono')
|
||||
yield frame
|
||||
print(f'{type(data)}')
|
||||
frames.append(data)
|
||||
|
||||
print("录音结束.")
|
||||
|
||||
# 停止并关闭音频流
|
||||
stream.stop_stream()
|
||||
stream.close()
|
||||
p.terminate()
|
||||
|
||||
def frames_write_wave(frames):
|
||||
path = temp_file(suffix='.wav')
|
||||
output_container = av.open(path, 'w')
|
||||
out_stream = output_container.add_stream('pcm_s16le')
|
||||
for frame in frames:
|
||||
for packet in out_stream.encode(frame):
|
||||
output_container.mux(packet)
|
||||
for packet in out_stream.encode(None):
|
||||
output_container.mux(packet)
|
||||
output_container.close()
|
||||
return path
|
||||
|
||||
if __name__ == '__main__':
|
||||
frames = [f for f in MonoMircoPhone()]
|
||||
frame_write_wave(frames)
|
278
rtcllm/vad.py
278
rtcllm/vad.py
@ -12,71 +12,76 @@ from scipy.io.wavfile import write
|
||||
import numpy as np
|
||||
import av
|
||||
from av import AudioLayout, AudioResampler, AudioFrame, AudioFormat
|
||||
from utils import frames_write_wave
|
||||
|
||||
class AudioTrackVad(MediaStreamTrack):
|
||||
def __init__(self, track, stage=3, onvoiceend=None):
|
||||
super().__init__()
|
||||
self.track = track
|
||||
self.onvoiceend = onvoiceend
|
||||
self.vad = webrtcvad.Vad(stage)
|
||||
# self.sample_rate = self.track.getSettings().sampleRate
|
||||
# frameSize = self.track.getSettings().frameSize
|
||||
# self.frame_duration_ms = (1000 * frameSize) / self.sample_rate
|
||||
self.frame_duration_ms = 0.02
|
||||
self.num_padding_frames = 20
|
||||
self.ring_buffer = collections.deque(maxlen=self.num_padding_frames)
|
||||
self.triggered = False
|
||||
def to16000_160_frames(frame):
|
||||
remind_byts = b''
|
||||
frames = resample(frame, sample_rate=16000)
|
||||
ret_frames = []
|
||||
for f in frames:
|
||||
if f.samples == 160:
|
||||
return frames
|
||||
for f in frames:
|
||||
b1 = remind_byts + frame2bytes(f)
|
||||
while len(b1) >= 320:
|
||||
b = b1[:320]
|
||||
b1 = b1[320:]
|
||||
ret_frames.append(bytes2frame(b))
|
||||
remind_byts = b1
|
||||
return ret_frames
|
||||
|
||||
def bytes2frame(byts, channels=1, sample_rate=16000):
|
||||
audio_data = np.frombuffer(byts, np.int16)
|
||||
audio_data = audio_data.reshape((channels, -1))
|
||||
layout = 'mono'
|
||||
if channels == 2:
|
||||
layout = 'stereo'
|
||||
# Create an AV frame from the audio data
|
||||
frame = av.AudioFrame.from_ndarray(audio_data, format='s16', layout='mono')
|
||||
frame.sample_rate = sample_rate
|
||||
return frame
|
||||
|
||||
def frame2bytes(frame):
|
||||
audio_array = frame.to_ndarray()
|
||||
dtype = audio_array.dtype
|
||||
audio_bytes = audio_array.tobytes()
|
||||
return audio_bytes
|
||||
|
||||
def resample(frame, sample_rate=None):
|
||||
if sample_rate is None:
|
||||
sample_rate = frame.rate
|
||||
r = AudioResampler(format='s16', layout='mono', rate=sample_rate)
|
||||
frame = r.resample(frame)
|
||||
return frame
|
||||
|
||||
class MyVad(webrtcvad.Vad):
|
||||
def __init__(self, callback=None):
|
||||
super().__init__(3)
|
||||
self.voiced_frames = []
|
||||
self.loop = asyncio.get_event_loop()
|
||||
self.task = None
|
||||
self.debug = True
|
||||
self.running = False
|
||||
self.num_padding_frames = 40
|
||||
self.ring_buffer = collections.deque(maxlen=self.num_padding_frames)
|
||||
self.onvoiceend = callback
|
||||
self.triggered = False
|
||||
self.cnt = 0
|
||||
|
||||
def start_vad(self):
|
||||
self.running = True
|
||||
self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
|
||||
|
||||
def _recv(self):
|
||||
asyncio.create_task(self.recv())
|
||||
def voice_duration(self):
|
||||
duration = 0
|
||||
for f in self.voiced_frames:
|
||||
duration = f.samples * 1000 / f.sample_rate + duration
|
||||
return duration
|
||||
|
||||
def stop(self):
|
||||
self.running = False
|
||||
|
||||
def frame2bytes(self, frame):
|
||||
audio_array = frame.to_ndarray()
|
||||
dtype = audio_array.dtype
|
||||
audio_bytes = audio_array.tobytes()
|
||||
return audio_bytes
|
||||
|
||||
async def recv(self):
|
||||
frame = await self.track.recv()
|
||||
self.sample_rate = frame.sample_rate
|
||||
duration = (frame.samples * 1000) / frame.sample_rate
|
||||
# print(f'{self.__class__.__name__}.recv(): {duration=}, {frame.samples=}, {frame.sample_rate=}')
|
||||
try:
|
||||
await self.vad_check(frame)
|
||||
except Exception as e:
|
||||
print(f'{e=}')
|
||||
print_exc()
|
||||
return
|
||||
if self.task:
|
||||
self.task.cancel()
|
||||
if self.running:
|
||||
self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
|
||||
return frame
|
||||
|
||||
def resample(self, frame, sample_rate=None):
|
||||
if sample_rate is None:
|
||||
sample_rate = frame.rate
|
||||
r = AudioResampler(format='s16', layout='mono', rate=sample_rate)
|
||||
frame = r.resample(frame)
|
||||
return frame
|
||||
|
||||
async def vad_check(self, inframe):
|
||||
frames = self.resample(inframe)
|
||||
frame = frames[0]
|
||||
is_speech = self.vad.is_speech(self.frame2bytes(frame),
|
||||
self.sample_rate)
|
||||
def vad_check(self, inframe):
|
||||
"""
|
||||
ONLY SUPPORT frame with sample_rate = 16000 samples = 160
|
||||
"""
|
||||
frame = inframe
|
||||
byts = frame2bytes(frame)
|
||||
if self.cnt == 0:
|
||||
f = frame
|
||||
print(f'{f.sample_rate=}, {f.samples=},{f.layout=}, {len(byts)=}')
|
||||
if not webrtcvad.valid_rate_and_frame_length(frame.sample_rate, frame.samples):
|
||||
print('ftcygvhbunjiokmpl,mknjbhvgc')
|
||||
is_speech = self.is_speech(byts, frame.sample_rate, length=frame.samples)
|
||||
if not self.triggered:
|
||||
self.ring_buffer.append((inframe, is_speech))
|
||||
num_voiced = len([f for f, speech in self.ring_buffer if speech])
|
||||
@ -104,102 +109,73 @@ class AudioTrackVad(MediaStreamTrack):
|
||||
if num_unvoiced > 0.9 * self.ring_buffer.maxlen:
|
||||
self.triggered = False
|
||||
duration = self.voice_duration()
|
||||
if duration > 500 and self.onvoiceend:
|
||||
ret = await self.write_wave()
|
||||
await self.onvoiceend(ret)
|
||||
if duration > 500:
|
||||
ret = frames_write_wave(self.voiced_frames)
|
||||
if self.onvoiceend:
|
||||
self.onvoiceend(ret)
|
||||
else:
|
||||
print(f'vad sound {duration=}')
|
||||
print('-----short voice------')
|
||||
|
||||
|
||||
self.ring_buffer.clear()
|
||||
self.voiced_frames = []
|
||||
self.cnt += 1
|
||||
|
||||
def to_mono16000_data(self):
|
||||
lst = []
|
||||
for f in self.voiced_frames:
|
||||
fs = self.resample(f, sample_rate=16000)
|
||||
lst += fs
|
||||
audio_data = b''.join([self.frame2bytes(f) for f in lst])
|
||||
return audio_data
|
||||
|
||||
async def gen_base64(self):
|
||||
audio_data = self.to_mono16000_data()
|
||||
b64 = base64.b64encode(audio_data).decode('utf-8')
|
||||
return b64
|
||||
class AudioTrackVad(MediaStreamTrack):
|
||||
def __init__(self, track, stage=3, onvoiceend=None):
|
||||
super().__init__()
|
||||
self.track = track
|
||||
self.vad = MyVad(callback=onvoiceend)
|
||||
# self.sample_rate = self.track.getSettings().sampleRate
|
||||
# frameSize = self.track.getSettings().frameSize
|
||||
# self.frame_duration_ms = (1000 * frameSize) / self.sample_rate
|
||||
self.frame_duration_ms = 0.02
|
||||
self.remind_byts = b''
|
||||
self.loop = asyncio.get_event_loop()
|
||||
self.task = None
|
||||
self.debug = True
|
||||
self.running = False
|
||||
|
||||
def start_vad(self):
|
||||
self.running = True
|
||||
self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
|
||||
|
||||
def voice_duration(self):
|
||||
duration = 0
|
||||
for f in self.voiced_frames:
|
||||
duration = f.samples * 1000 / f.sample_rate + duration
|
||||
return duration
|
||||
def _recv(self):
|
||||
asyncio.create_task(self.recv())
|
||||
|
||||
def frames_resample(self, frames, sr=None):
|
||||
fs = []
|
||||
def stop(self):
|
||||
self.running = False
|
||||
|
||||
def to16000_160_frames(self, frame):
|
||||
frames = resample(frame, sample_rate=16000)
|
||||
ret_frames = []
|
||||
for f in frames:
|
||||
fs1 = self.resample(f, sample_rate=sr)
|
||||
fs += fs1
|
||||
return fs
|
||||
if f.samples == 160:
|
||||
return frames
|
||||
for f in frames:
|
||||
b1 = self.remind_byts + frame2bytes(f)
|
||||
while len(b1) >= 320:
|
||||
b = b1[:320]
|
||||
b1 = b1[320:]
|
||||
ret_frames.append(bytes2frame(b))
|
||||
self.remind_byts = b1
|
||||
|
||||
def frames_write_wave(self, frames):
|
||||
"""
|
||||
fb = [ self.frame2bytes(f) for f in frames ]
|
||||
ndarr = np.frombuffer(b''.join(fb), dtype=np.int16)
|
||||
fn = temp_file(suffix='.wav')
|
||||
write(fn, frames[0].sample_rate, ndarr)
|
||||
return fn
|
||||
"""
|
||||
path = temp_file(suffix='.wav')
|
||||
output_container = av.open(path, 'w')
|
||||
out_stream = output_container.add_stream('pcm_s16le')
|
||||
for frame in frames:
|
||||
for packet in out_stream.encode(frame):
|
||||
output_container.mux(packet)
|
||||
for packet in out_stream.encode(None):
|
||||
output_container.mux(packet)
|
||||
output_container.close()
|
||||
return path
|
||||
|
||||
|
||||
async def write_wave(self):
|
||||
"""Writes a .wav file.
|
||||
|
||||
Takes path, PCM audio data, and sample rate.
|
||||
"""
|
||||
|
||||
"""
|
||||
############
|
||||
# Method:1
|
||||
############
|
||||
audio_data = self.to_mono16000_data()
|
||||
path = temp_file(suffix='.wav')
|
||||
# print(f'temp_file={path}')
|
||||
|
||||
with contextlib.closing(wave.open(path, 'wb')) as wf:
|
||||
wf.setnchannels(1)
|
||||
wf.setsampwidth(2)
|
||||
wf.setframerate(16000)
|
||||
wf.writeframes(audio_data)
|
||||
|
||||
# print('************wrote*******')
|
||||
if self.onvoiceend:
|
||||
await self.onvoiceend(path)
|
||||
# print('************over*******')
|
||||
return
|
||||
############
|
||||
# Method:2
|
||||
############
|
||||
path = temp_file(suffix='.wav')
|
||||
output_container = av.open(path, 'w')
|
||||
out_stream = output_container.add_stream('pcm_s16le', rate=16000, layout='mono')
|
||||
resampler = AudioResampler(format=out_stream.format, layout=out_stream.layout, rate=out_stream.rate)
|
||||
for frame in self.voiced_frames:
|
||||
for f in resampler.resample(frame):
|
||||
output_container.mux(out_stream.encode(f))
|
||||
output_container.mux(out_stream.encode())
|
||||
output_container.close()
|
||||
return path
|
||||
"""
|
||||
f1 = self.frames_write_wave(self.voiced_frames)
|
||||
frames = self.frames_resample(self.voiced_frames, sr=16000)
|
||||
fn = self.frames_write_wave(frames)
|
||||
print(f'source wave filename={f1}, mono 16000 wave filename={fn}')
|
||||
return fn
|
||||
|
||||
async def recv(self):
|
||||
frame = await self.track.recv()
|
||||
self.sample_rate = frame.sample_rate
|
||||
duration = (frame.samples * 1000) / frame.sample_rate
|
||||
# print(f'{self.__class__.__name__}.recv(): {duration=}, {frame.samples=}, {frame.sample_rate=}')
|
||||
try:
|
||||
frames = self.to16000_160_frames(frame)
|
||||
for frame in frames:
|
||||
await self.vad.vad_check(frame)
|
||||
except Exception as e:
|
||||
print(f'{e=}')
|
||||
print_exc()
|
||||
return
|
||||
if self.task:
|
||||
self.task.cancel()
|
||||
if self.running:
|
||||
self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
|
||||
return frame
|
||||
|
Loading…
Reference in New Issue
Block a user