bugfix
This commit is contained in:
parent
cc73b12301
commit
7000471bbe
70
rtcllm/rec.py
Normal file
70
rtcllm/rec.py
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
import pyaudio
|
||||||
|
import av
|
||||||
|
import numpy as np
|
||||||
|
from utils import frames_write_wave
|
||||||
|
import sys
|
||||||
|
import select
|
||||||
|
from vad import MyVad, bytes2frame, to16000_160_frames
|
||||||
|
|
||||||
|
def check_input():
|
||||||
|
if select.select([sys.stdin], [], [], 0.0)[0]:
|
||||||
|
input_str = sys.stdin.readline().strip()
|
||||||
|
return input_str
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def mic(seconds=None, frames_ms=20):
|
||||||
|
# PyAudio setup
|
||||||
|
#
|
||||||
|
p = pyaudio.PyAudio()
|
||||||
|
|
||||||
|
# Audio format
|
||||||
|
FORMAT = pyaudio.paInt16 # 16-bit resolution
|
||||||
|
CHANNELS = 1 # 1 channel for mono
|
||||||
|
RATE = 16000 # 44.1kHz sampling rate
|
||||||
|
CHUNK = int(RATE / 100) * 2 # 1024 samples per frame
|
||||||
|
# PyAudio input stream
|
||||||
|
stream = p.open(format=FORMAT,
|
||||||
|
channels=CHANNELS,
|
||||||
|
rate=RATE,
|
||||||
|
input=True,
|
||||||
|
frames_per_buffer=CHUNK)
|
||||||
|
|
||||||
|
print("Recording and streaming audio...")
|
||||||
|
if seconds is not None:
|
||||||
|
cnt = int(seconds * 1000 / frames_ms)
|
||||||
|
else:
|
||||||
|
cnt = 0
|
||||||
|
i = 0
|
||||||
|
while True:
|
||||||
|
if cnt == 0:
|
||||||
|
if check_input() == 'q':
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
if i>=cnt:
|
||||||
|
break
|
||||||
|
# Read audio frames from the microphone
|
||||||
|
data = stream.read(CHUNK)
|
||||||
|
# print(f'{data.__class__.__name__}, {len(data)=}, {CHUNK=}')
|
||||||
|
frames = to16000_160_frames(bytes2frame(data))
|
||||||
|
for frame in frames:
|
||||||
|
yield frame
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
# Close the audio stream and the container
|
||||||
|
stream.close()
|
||||||
|
p.terminate()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
def cb(f):
|
||||||
|
print(f'{f} voice wave file')
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
vad = MyVad(callback=cb)
|
||||||
|
for f in mic():
|
||||||
|
if i == 0:
|
||||||
|
print(f'{f.sample_rate=}, {f.samples=}, {f.layout=} ')
|
||||||
|
i += 1
|
||||||
|
vad.vad_check(f)
|
||||||
|
|
||||||
|
print(f'record save to {f}')
|
64
rtcllm/utils.py
Normal file
64
rtcllm/utils.py
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
import pyaudio
|
||||||
|
import av
|
||||||
|
from av import AudioFrame
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from appPublic.folderUtils import temp_file
|
||||||
|
|
||||||
|
# 录音参数
|
||||||
|
|
||||||
|
def MonoMircoPhone():
|
||||||
|
CHUNK = 1024
|
||||||
|
FORMAT = pyaudio.paInt16
|
||||||
|
CHANNELS = 1
|
||||||
|
RATE = 44100
|
||||||
|
RECORD_SECONDS = 10
|
||||||
|
|
||||||
|
# 初始化PyAudio
|
||||||
|
p = pyaudio.PyAudio()
|
||||||
|
|
||||||
|
# 打开音频流
|
||||||
|
stream = p.open(format=FORMAT,
|
||||||
|
channels=CHANNELS,
|
||||||
|
rate=RATE,
|
||||||
|
input=True,
|
||||||
|
frames_per_buffer=CHUNK)
|
||||||
|
|
||||||
|
print("开始录音...")
|
||||||
|
frames = []
|
||||||
|
|
||||||
|
for _ in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
|
||||||
|
data = stream.read(CHUNK)
|
||||||
|
ndarr = np.frombuffer(data, dtype=np.int16)
|
||||||
|
print(ndarr.shape)
|
||||||
|
c = ndarr.shape[0]
|
||||||
|
ndarr.reshape(c,1)
|
||||||
|
print(ndarr.shape)
|
||||||
|
|
||||||
|
frame = AudioFrame.from_ndarray(ndarr, format='s16', layout='mono')
|
||||||
|
yield frame
|
||||||
|
print(f'{type(data)}')
|
||||||
|
frames.append(data)
|
||||||
|
|
||||||
|
print("录音结束.")
|
||||||
|
|
||||||
|
# 停止并关闭音频流
|
||||||
|
stream.stop_stream()
|
||||||
|
stream.close()
|
||||||
|
p.terminate()
|
||||||
|
|
||||||
|
def frames_write_wave(frames):
|
||||||
|
path = temp_file(suffix='.wav')
|
||||||
|
output_container = av.open(path, 'w')
|
||||||
|
out_stream = output_container.add_stream('pcm_s16le')
|
||||||
|
for frame in frames:
|
||||||
|
for packet in out_stream.encode(frame):
|
||||||
|
output_container.mux(packet)
|
||||||
|
for packet in out_stream.encode(None):
|
||||||
|
output_container.mux(packet)
|
||||||
|
output_container.close()
|
||||||
|
return path
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
frames = [f for f in MonoMircoPhone()]
|
||||||
|
frame_write_wave(frames)
|
252
rtcllm/vad.py
252
rtcllm/vad.py
@ -12,71 +12,76 @@ from scipy.io.wavfile import write
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import av
|
import av
|
||||||
from av import AudioLayout, AudioResampler, AudioFrame, AudioFormat
|
from av import AudioLayout, AudioResampler, AudioFrame, AudioFormat
|
||||||
|
from utils import frames_write_wave
|
||||||
|
|
||||||
class AudioTrackVad(MediaStreamTrack):
|
def to16000_160_frames(frame):
|
||||||
def __init__(self, track, stage=3, onvoiceend=None):
|
remind_byts = b''
|
||||||
super().__init__()
|
frames = resample(frame, sample_rate=16000)
|
||||||
self.track = track
|
ret_frames = []
|
||||||
self.onvoiceend = onvoiceend
|
for f in frames:
|
||||||
self.vad = webrtcvad.Vad(stage)
|
if f.samples == 160:
|
||||||
# self.sample_rate = self.track.getSettings().sampleRate
|
return frames
|
||||||
# frameSize = self.track.getSettings().frameSize
|
for f in frames:
|
||||||
# self.frame_duration_ms = (1000 * frameSize) / self.sample_rate
|
b1 = remind_byts + frame2bytes(f)
|
||||||
self.frame_duration_ms = 0.02
|
while len(b1) >= 320:
|
||||||
self.num_padding_frames = 20
|
b = b1[:320]
|
||||||
self.ring_buffer = collections.deque(maxlen=self.num_padding_frames)
|
b1 = b1[320:]
|
||||||
self.triggered = False
|
ret_frames.append(bytes2frame(b))
|
||||||
self.voiced_frames = []
|
remind_byts = b1
|
||||||
self.loop = asyncio.get_event_loop()
|
return ret_frames
|
||||||
self.task = None
|
|
||||||
self.debug = True
|
|
||||||
self.running = False
|
|
||||||
|
|
||||||
def start_vad(self):
|
def bytes2frame(byts, channels=1, sample_rate=16000):
|
||||||
self.running = True
|
audio_data = np.frombuffer(byts, np.int16)
|
||||||
self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
|
audio_data = audio_data.reshape((channels, -1))
|
||||||
|
layout = 'mono'
|
||||||
|
if channels == 2:
|
||||||
|
layout = 'stereo'
|
||||||
|
# Create an AV frame from the audio data
|
||||||
|
frame = av.AudioFrame.from_ndarray(audio_data, format='s16', layout='mono')
|
||||||
|
frame.sample_rate = sample_rate
|
||||||
|
return frame
|
||||||
|
|
||||||
def _recv(self):
|
def frame2bytes(frame):
|
||||||
asyncio.create_task(self.recv())
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
self.running = False
|
|
||||||
|
|
||||||
def frame2bytes(self, frame):
|
|
||||||
audio_array = frame.to_ndarray()
|
audio_array = frame.to_ndarray()
|
||||||
dtype = audio_array.dtype
|
dtype = audio_array.dtype
|
||||||
audio_bytes = audio_array.tobytes()
|
audio_bytes = audio_array.tobytes()
|
||||||
return audio_bytes
|
return audio_bytes
|
||||||
|
|
||||||
async def recv(self):
|
def resample(frame, sample_rate=None):
|
||||||
frame = await self.track.recv()
|
|
||||||
self.sample_rate = frame.sample_rate
|
|
||||||
duration = (frame.samples * 1000) / frame.sample_rate
|
|
||||||
# print(f'{self.__class__.__name__}.recv(): {duration=}, {frame.samples=}, {frame.sample_rate=}')
|
|
||||||
try:
|
|
||||||
await self.vad_check(frame)
|
|
||||||
except Exception as e:
|
|
||||||
print(f'{e=}')
|
|
||||||
print_exc()
|
|
||||||
return
|
|
||||||
if self.task:
|
|
||||||
self.task.cancel()
|
|
||||||
if self.running:
|
|
||||||
self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
|
|
||||||
return frame
|
|
||||||
|
|
||||||
def resample(self, frame, sample_rate=None):
|
|
||||||
if sample_rate is None:
|
if sample_rate is None:
|
||||||
sample_rate = frame.rate
|
sample_rate = frame.rate
|
||||||
r = AudioResampler(format='s16', layout='mono', rate=sample_rate)
|
r = AudioResampler(format='s16', layout='mono', rate=sample_rate)
|
||||||
frame = r.resample(frame)
|
frame = r.resample(frame)
|
||||||
return frame
|
return frame
|
||||||
|
|
||||||
async def vad_check(self, inframe):
|
class MyVad(webrtcvad.Vad):
|
||||||
frames = self.resample(inframe)
|
def __init__(self, callback=None):
|
||||||
frame = frames[0]
|
super().__init__(3)
|
||||||
is_speech = self.vad.is_speech(self.frame2bytes(frame),
|
self.voiced_frames = []
|
||||||
self.sample_rate)
|
self.num_padding_frames = 40
|
||||||
|
self.ring_buffer = collections.deque(maxlen=self.num_padding_frames)
|
||||||
|
self.onvoiceend = callback
|
||||||
|
self.triggered = False
|
||||||
|
self.cnt = 0
|
||||||
|
|
||||||
|
def voice_duration(self):
|
||||||
|
duration = 0
|
||||||
|
for f in self.voiced_frames:
|
||||||
|
duration = f.samples * 1000 / f.sample_rate + duration
|
||||||
|
return duration
|
||||||
|
|
||||||
|
def vad_check(self, inframe):
|
||||||
|
"""
|
||||||
|
ONLY SUPPORT frame with sample_rate = 16000 samples = 160
|
||||||
|
"""
|
||||||
|
frame = inframe
|
||||||
|
byts = frame2bytes(frame)
|
||||||
|
if self.cnt == 0:
|
||||||
|
f = frame
|
||||||
|
print(f'{f.sample_rate=}, {f.samples=},{f.layout=}, {len(byts)=}')
|
||||||
|
if not webrtcvad.valid_rate_and_frame_length(frame.sample_rate, frame.samples):
|
||||||
|
print('ftcygvhbunjiokmpl,mknjbhvgc')
|
||||||
|
is_speech = self.is_speech(byts, frame.sample_rate, length=frame.samples)
|
||||||
if not self.triggered:
|
if not self.triggered:
|
||||||
self.ring_buffer.append((inframe, is_speech))
|
self.ring_buffer.append((inframe, is_speech))
|
||||||
num_voiced = len([f for f, speech in self.ring_buffer if speech])
|
num_voiced = len([f for f, speech in self.ring_buffer if speech])
|
||||||
@ -104,102 +109,73 @@ class AudioTrackVad(MediaStreamTrack):
|
|||||||
if num_unvoiced > 0.9 * self.ring_buffer.maxlen:
|
if num_unvoiced > 0.9 * self.ring_buffer.maxlen:
|
||||||
self.triggered = False
|
self.triggered = False
|
||||||
duration = self.voice_duration()
|
duration = self.voice_duration()
|
||||||
if duration > 500 and self.onvoiceend:
|
if duration > 500:
|
||||||
ret = await self.write_wave()
|
ret = frames_write_wave(self.voiced_frames)
|
||||||
await self.onvoiceend(ret)
|
if self.onvoiceend:
|
||||||
|
self.onvoiceend(ret)
|
||||||
else:
|
else:
|
||||||
print(f'vad sound {duration=}')
|
print('-----short voice------')
|
||||||
|
|
||||||
|
|
||||||
self.ring_buffer.clear()
|
self.ring_buffer.clear()
|
||||||
self.voiced_frames = []
|
self.voiced_frames = []
|
||||||
|
self.cnt += 1
|
||||||
|
|
||||||
def to_mono16000_data(self):
|
class AudioTrackVad(MediaStreamTrack):
|
||||||
lst = []
|
def __init__(self, track, stage=3, onvoiceend=None):
|
||||||
for f in self.voiced_frames:
|
super().__init__()
|
||||||
fs = self.resample(f, sample_rate=16000)
|
self.track = track
|
||||||
lst += fs
|
self.vad = MyVad(callback=onvoiceend)
|
||||||
audio_data = b''.join([self.frame2bytes(f) for f in lst])
|
# self.sample_rate = self.track.getSettings().sampleRate
|
||||||
return audio_data
|
# frameSize = self.track.getSettings().frameSize
|
||||||
|
# self.frame_duration_ms = (1000 * frameSize) / self.sample_rate
|
||||||
|
self.frame_duration_ms = 0.02
|
||||||
|
self.remind_byts = b''
|
||||||
|
self.loop = asyncio.get_event_loop()
|
||||||
|
self.task = None
|
||||||
|
self.debug = True
|
||||||
|
self.running = False
|
||||||
|
|
||||||
async def gen_base64(self):
|
def start_vad(self):
|
||||||
audio_data = self.to_mono16000_data()
|
self.running = True
|
||||||
b64 = base64.b64encode(audio_data).decode('utf-8')
|
self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
|
||||||
return b64
|
|
||||||
|
|
||||||
def voice_duration(self):
|
def _recv(self):
|
||||||
duration = 0
|
asyncio.create_task(self.recv())
|
||||||
for f in self.voiced_frames:
|
|
||||||
duration = f.samples * 1000 / f.sample_rate + duration
|
|
||||||
return duration
|
|
||||||
|
|
||||||
def frames_resample(self, frames, sr=None):
|
def stop(self):
|
||||||
fs = []
|
self.running = False
|
||||||
|
|
||||||
|
def to16000_160_frames(self, frame):
|
||||||
|
frames = resample(frame, sample_rate=16000)
|
||||||
|
ret_frames = []
|
||||||
for f in frames:
|
for f in frames:
|
||||||
fs1 = self.resample(f, sample_rate=sr)
|
if f.samples == 160:
|
||||||
fs += fs1
|
return frames
|
||||||
return fs
|
for f in frames:
|
||||||
|
b1 = self.remind_byts + frame2bytes(f)
|
||||||
|
while len(b1) >= 320:
|
||||||
|
b = b1[:320]
|
||||||
|
b1 = b1[320:]
|
||||||
|
ret_frames.append(bytes2frame(b))
|
||||||
|
self.remind_byts = b1
|
||||||
|
|
||||||
def frames_write_wave(self, frames):
|
|
||||||
"""
|
async def recv(self):
|
||||||
fb = [ self.frame2bytes(f) for f in frames ]
|
frame = await self.track.recv()
|
||||||
ndarr = np.frombuffer(b''.join(fb), dtype=np.int16)
|
self.sample_rate = frame.sample_rate
|
||||||
fn = temp_file(suffix='.wav')
|
duration = (frame.samples * 1000) / frame.sample_rate
|
||||||
write(fn, frames[0].sample_rate, ndarr)
|
# print(f'{self.__class__.__name__}.recv(): {duration=}, {frame.samples=}, {frame.sample_rate=}')
|
||||||
return fn
|
try:
|
||||||
"""
|
frames = self.to16000_160_frames(frame)
|
||||||
path = temp_file(suffix='.wav')
|
|
||||||
output_container = av.open(path, 'w')
|
|
||||||
out_stream = output_container.add_stream('pcm_s16le')
|
|
||||||
for frame in frames:
|
for frame in frames:
|
||||||
for packet in out_stream.encode(frame):
|
await self.vad.vad_check(frame)
|
||||||
output_container.mux(packet)
|
except Exception as e:
|
||||||
for packet in out_stream.encode(None):
|
print(f'{e=}')
|
||||||
output_container.mux(packet)
|
print_exc()
|
||||||
output_container.close()
|
|
||||||
return path
|
|
||||||
|
|
||||||
|
|
||||||
async def write_wave(self):
|
|
||||||
"""Writes a .wav file.
|
|
||||||
|
|
||||||
Takes path, PCM audio data, and sample rate.
|
|
||||||
"""
|
|
||||||
|
|
||||||
"""
|
|
||||||
############
|
|
||||||
# Method:1
|
|
||||||
############
|
|
||||||
audio_data = self.to_mono16000_data()
|
|
||||||
path = temp_file(suffix='.wav')
|
|
||||||
# print(f'temp_file={path}')
|
|
||||||
|
|
||||||
with contextlib.closing(wave.open(path, 'wb')) as wf:
|
|
||||||
wf.setnchannels(1)
|
|
||||||
wf.setsampwidth(2)
|
|
||||||
wf.setframerate(16000)
|
|
||||||
wf.writeframes(audio_data)
|
|
||||||
|
|
||||||
# print('************wrote*******')
|
|
||||||
if self.onvoiceend:
|
|
||||||
await self.onvoiceend(path)
|
|
||||||
# print('************over*******')
|
|
||||||
return
|
return
|
||||||
############
|
if self.task:
|
||||||
# Method:2
|
self.task.cancel()
|
||||||
############
|
if self.running:
|
||||||
path = temp_file(suffix='.wav')
|
self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
|
||||||
output_container = av.open(path, 'w')
|
return frame
|
||||||
out_stream = output_container.add_stream('pcm_s16le', rate=16000, layout='mono')
|
|
||||||
resampler = AudioResampler(format=out_stream.format, layout=out_stream.layout, rate=out_stream.rate)
|
|
||||||
for frame in self.voiced_frames:
|
|
||||||
for f in resampler.resample(frame):
|
|
||||||
output_container.mux(out_stream.encode(f))
|
|
||||||
output_container.mux(out_stream.encode())
|
|
||||||
output_container.close()
|
|
||||||
return path
|
|
||||||
"""
|
|
||||||
f1 = self.frames_write_wave(self.voiced_frames)
|
|
||||||
frames = self.frames_resample(self.voiced_frames, sr=16000)
|
|
||||||
fn = self.frames_write_wave(frames)
|
|
||||||
print(f'source wave filename={f1}, mono 16000 wave filename={fn}')
|
|
||||||
return fn
|
|
||||||
|
Loading…
Reference in New Issue
Block a user