This commit is contained in:
yumoqing 2024-09-18 15:28:01 +08:00
parent cc73b12301
commit 7000471bbe
3 changed files with 261 additions and 151 deletions

70
rtcllm/rec.py Normal file
View File

@ -0,0 +1,70 @@
import pyaudio
import av
import numpy as np
from utils import frames_write_wave
import sys
import select
from vad import MyVad, bytes2frame, to16000_160_frames
def check_input():
if select.select([sys.stdin], [], [], 0.0)[0]:
input_str = sys.stdin.readline().strip()
return input_str
else:
return None
def mic(seconds=None, frames_ms=20):
# PyAudio setup
#
p = pyaudio.PyAudio()
# Audio format
FORMAT = pyaudio.paInt16 # 16-bit resolution
CHANNELS = 1 # 1 channel for mono
RATE = 16000 # 44.1kHz sampling rate
CHUNK = int(RATE / 100) * 2 # 1024 samples per frame
# PyAudio input stream
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("Recording and streaming audio...")
if seconds is not None:
cnt = int(seconds * 1000 / frames_ms)
else:
cnt = 0
i = 0
while True:
if cnt == 0:
if check_input() == 'q':
break
else:
if i>=cnt:
break
# Read audio frames from the microphone
data = stream.read(CHUNK)
# print(f'{data.__class__.__name__}, {len(data)=}, {CHUNK=}')
frames = to16000_160_frames(bytes2frame(data))
for frame in frames:
yield frame
i += 1
# Close the audio stream and the container
stream.close()
p.terminate()
if __name__ == '__main__':
def cb(f):
print(f'{f} voice wave file')
i = 0
vad = MyVad(callback=cb)
for f in mic():
if i == 0:
print(f'{f.sample_rate=}, {f.samples=}, {f.layout=} ')
i += 1
vad.vad_check(f)
print(f'record save to {f}')

64
rtcllm/utils.py Normal file
View File

@ -0,0 +1,64 @@
import pyaudio
import av
from av import AudioFrame
import numpy as np
from appPublic.folderUtils import temp_file
# 录音参数
def MonoMircoPhone():
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 10
# 初始化PyAudio
p = pyaudio.PyAudio()
# 打开音频流
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("开始录音...")
frames = []
for _ in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
ndarr = np.frombuffer(data, dtype=np.int16)
print(ndarr.shape)
c = ndarr.shape[0]
ndarr.reshape(c,1)
print(ndarr.shape)
frame = AudioFrame.from_ndarray(ndarr, format='s16', layout='mono')
yield frame
print(f'{type(data)}')
frames.append(data)
print("录音结束.")
# 停止并关闭音频流
stream.stop_stream()
stream.close()
p.terminate()
def frames_write_wave(frames):
path = temp_file(suffix='.wav')
output_container = av.open(path, 'w')
out_stream = output_container.add_stream('pcm_s16le')
for frame in frames:
for packet in out_stream.encode(frame):
output_container.mux(packet)
for packet in out_stream.encode(None):
output_container.mux(packet)
output_container.close()
return path
if __name__ == '__main__':
frames = [f for f in MonoMircoPhone()]
frame_write_wave(frames)

View File

@ -12,71 +12,76 @@ from scipy.io.wavfile import write
import numpy as np import numpy as np
import av import av
from av import AudioLayout, AudioResampler, AudioFrame, AudioFormat from av import AudioLayout, AudioResampler, AudioFrame, AudioFormat
from utils import frames_write_wave
class AudioTrackVad(MediaStreamTrack): def to16000_160_frames(frame):
def __init__(self, track, stage=3, onvoiceend=None): remind_byts = b''
super().__init__() frames = resample(frame, sample_rate=16000)
self.track = track ret_frames = []
self.onvoiceend = onvoiceend for f in frames:
self.vad = webrtcvad.Vad(stage) if f.samples == 160:
# self.sample_rate = self.track.getSettings().sampleRate return frames
# frameSize = self.track.getSettings().frameSize for f in frames:
# self.frame_duration_ms = (1000 * frameSize) / self.sample_rate b1 = remind_byts + frame2bytes(f)
self.frame_duration_ms = 0.02 while len(b1) >= 320:
self.num_padding_frames = 20 b = b1[:320]
self.ring_buffer = collections.deque(maxlen=self.num_padding_frames) b1 = b1[320:]
self.triggered = False ret_frames.append(bytes2frame(b))
remind_byts = b1
return ret_frames
def bytes2frame(byts, channels=1, sample_rate=16000):
audio_data = np.frombuffer(byts, np.int16)
audio_data = audio_data.reshape((channels, -1))
layout = 'mono'
if channels == 2:
layout = 'stereo'
# Create an AV frame from the audio data
frame = av.AudioFrame.from_ndarray(audio_data, format='s16', layout='mono')
frame.sample_rate = sample_rate
return frame
def frame2bytes(frame):
audio_array = frame.to_ndarray()
dtype = audio_array.dtype
audio_bytes = audio_array.tobytes()
return audio_bytes
def resample(frame, sample_rate=None):
if sample_rate is None:
sample_rate = frame.rate
r = AudioResampler(format='s16', layout='mono', rate=sample_rate)
frame = r.resample(frame)
return frame
class MyVad(webrtcvad.Vad):
def __init__(self, callback=None):
super().__init__(3)
self.voiced_frames = [] self.voiced_frames = []
self.loop = asyncio.get_event_loop() self.num_padding_frames = 40
self.task = None self.ring_buffer = collections.deque(maxlen=self.num_padding_frames)
self.debug = True self.onvoiceend = callback
self.running = False self.triggered = False
self.cnt = 0
def start_vad(self): def voice_duration(self):
self.running = True duration = 0
self.task = self.loop.call_later(self.frame_duration_ms, self._recv) for f in self.voiced_frames:
duration = f.samples * 1000 / f.sample_rate + duration
return duration
def _recv(self): def vad_check(self, inframe):
asyncio.create_task(self.recv()) """
ONLY SUPPORT frame with sample_rate = 16000 samples = 160
def stop(self): """
self.running = False frame = inframe
byts = frame2bytes(frame)
def frame2bytes(self, frame): if self.cnt == 0:
audio_array = frame.to_ndarray() f = frame
dtype = audio_array.dtype print(f'{f.sample_rate=}, {f.samples=},{f.layout=}, {len(byts)=}')
audio_bytes = audio_array.tobytes() if not webrtcvad.valid_rate_and_frame_length(frame.sample_rate, frame.samples):
return audio_bytes print('ftcygvhbunjiokmpl,mknjbhvgc')
is_speech = self.is_speech(byts, frame.sample_rate, length=frame.samples)
async def recv(self):
frame = await self.track.recv()
self.sample_rate = frame.sample_rate
duration = (frame.samples * 1000) / frame.sample_rate
# print(f'{self.__class__.__name__}.recv(): {duration=}, {frame.samples=}, {frame.sample_rate=}')
try:
await self.vad_check(frame)
except Exception as e:
print(f'{e=}')
print_exc()
return
if self.task:
self.task.cancel()
if self.running:
self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
return frame
def resample(self, frame, sample_rate=None):
if sample_rate is None:
sample_rate = frame.rate
r = AudioResampler(format='s16', layout='mono', rate=sample_rate)
frame = r.resample(frame)
return frame
async def vad_check(self, inframe):
frames = self.resample(inframe)
frame = frames[0]
is_speech = self.vad.is_speech(self.frame2bytes(frame),
self.sample_rate)
if not self.triggered: if not self.triggered:
self.ring_buffer.append((inframe, is_speech)) self.ring_buffer.append((inframe, is_speech))
num_voiced = len([f for f, speech in self.ring_buffer if speech]) num_voiced = len([f for f, speech in self.ring_buffer if speech])
@ -104,102 +109,73 @@ class AudioTrackVad(MediaStreamTrack):
if num_unvoiced > 0.9 * self.ring_buffer.maxlen: if num_unvoiced > 0.9 * self.ring_buffer.maxlen:
self.triggered = False self.triggered = False
duration = self.voice_duration() duration = self.voice_duration()
if duration > 500 and self.onvoiceend: if duration > 500:
ret = await self.write_wave() ret = frames_write_wave(self.voiced_frames)
await self.onvoiceend(ret) if self.onvoiceend:
self.onvoiceend(ret)
else: else:
print(f'vad sound {duration=}') print('-----short voice------')
self.ring_buffer.clear() self.ring_buffer.clear()
self.voiced_frames = [] self.voiced_frames = []
self.cnt += 1
def to_mono16000_data(self): class AudioTrackVad(MediaStreamTrack):
lst = [] def __init__(self, track, stage=3, onvoiceend=None):
for f in self.voiced_frames: super().__init__()
fs = self.resample(f, sample_rate=16000) self.track = track
lst += fs self.vad = MyVad(callback=onvoiceend)
audio_data = b''.join([self.frame2bytes(f) for f in lst]) # self.sample_rate = self.track.getSettings().sampleRate
return audio_data # frameSize = self.track.getSettings().frameSize
# self.frame_duration_ms = (1000 * frameSize) / self.sample_rate
self.frame_duration_ms = 0.02
self.remind_byts = b''
self.loop = asyncio.get_event_loop()
self.task = None
self.debug = True
self.running = False
async def gen_base64(self): def start_vad(self):
audio_data = self.to_mono16000_data() self.running = True
b64 = base64.b64encode(audio_data).decode('utf-8') self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
return b64
def voice_duration(self): def _recv(self):
duration = 0 asyncio.create_task(self.recv())
for f in self.voiced_frames:
duration = f.samples * 1000 / f.sample_rate + duration
return duration
def frames_resample(self, frames, sr=None): def stop(self):
fs = [] self.running = False
def to16000_160_frames(self, frame):
frames = resample(frame, sample_rate=16000)
ret_frames = []
for f in frames: for f in frames:
fs1 = self.resample(f, sample_rate=sr) if f.samples == 160:
fs += fs1 return frames
return fs for f in frames:
b1 = self.remind_byts + frame2bytes(f)
def frames_write_wave(self, frames): while len(b1) >= 320:
""" b = b1[:320]
fb = [ self.frame2bytes(f) for f in frames ] b1 = b1[320:]
ndarr = np.frombuffer(b''.join(fb), dtype=np.int16) ret_frames.append(bytes2frame(b))
fn = temp_file(suffix='.wav') self.remind_byts = b1
write(fn, frames[0].sample_rate, ndarr)
return fn
"""
path = temp_file(suffix='.wav')
output_container = av.open(path, 'w')
out_stream = output_container.add_stream('pcm_s16le')
for frame in frames:
for packet in out_stream.encode(frame):
output_container.mux(packet)
for packet in out_stream.encode(None):
output_container.mux(packet)
output_container.close()
return path
async def write_wave(self): async def recv(self):
"""Writes a .wav file. frame = await self.track.recv()
self.sample_rate = frame.sample_rate
Takes path, PCM audio data, and sample rate. duration = (frame.samples * 1000) / frame.sample_rate
""" # print(f'{self.__class__.__name__}.recv(): {duration=}, {frame.samples=}, {frame.sample_rate=}')
try:
""" frames = self.to16000_160_frames(frame)
############ for frame in frames:
# Method:1 await self.vad.vad_check(frame)
############ except Exception as e:
audio_data = self.to_mono16000_data() print(f'{e=}')
path = temp_file(suffix='.wav') print_exc()
# print(f'temp_file={path}') return
if self.task:
with contextlib.closing(wave.open(path, 'wb')) as wf: self.task.cancel()
wf.setnchannels(1) if self.running:
wf.setsampwidth(2) self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
wf.setframerate(16000) return frame
wf.writeframes(audio_data)
# print('************wrote*******')
if self.onvoiceend:
await self.onvoiceend(path)
# print('************over*******')
return
############
# Method:2
############
path = temp_file(suffix='.wav')
output_container = av.open(path, 'w')
out_stream = output_container.add_stream('pcm_s16le', rate=16000, layout='mono')
resampler = AudioResampler(format=out_stream.format, layout=out_stream.layout, rate=out_stream.rate)
for frame in self.voiced_frames:
for f in resampler.resample(frame):
output_container.mux(out_stream.encode(f))
output_container.mux(out_stream.encode())
output_container.close()
return path
"""
f1 = self.frames_write_wave(self.voiced_frames)
frames = self.frames_resample(self.voiced_frames, sr=16000)
fn = self.frames_write_wave(frames)
print(f'source wave filename={f1}, mono 16000 wave filename={fn}')
return fn