bugfix
This commit is contained in:
parent
c225d69aba
commit
096c2c4d8e
@ -5,6 +5,7 @@ from functools import partial
|
||||
|
||||
from appPublic.dictObject import DictObject
|
||||
from aiortc import MediaStreamTrack, RTCPeerConnection, RTCSessionDescription, RTCIceCandidate
|
||||
from aiortc.sdp import candidate_from_sdp, candidate_to_sdp
|
||||
from aiortc.contrib.media import MediaBlackhole, MediaPlayer, MediaRecorder, MediaRelay
|
||||
|
||||
# from websockets.asyncio.client import connect
|
||||
@ -29,6 +30,9 @@ class RTCLLM:
|
||||
self.dc = None
|
||||
self.loop = asyncio.get_event_loop()
|
||||
|
||||
async def ws_send(self, s):
|
||||
self.ws.send(s)
|
||||
|
||||
def get_pc(self, data):
|
||||
return self.peers[data['from'].id].pc
|
||||
|
||||
@ -47,15 +51,15 @@ class RTCLLM:
|
||||
self.ws.close()
|
||||
|
||||
async def login(self):
|
||||
await self.ws.send(json.dumps({
|
||||
await self.ws_send(json.dumps({
|
||||
'type':'login',
|
||||
'info':self.info
|
||||
}))
|
||||
|
||||
async def on_icecandidate(self, pc, candidate):
|
||||
print('on_icecandidate()', self, pc, candidate)
|
||||
print('******************** on_icecandidate()', self, pc, candidate)
|
||||
if candidate:
|
||||
await self.ws.send(json.dumps({
|
||||
await self.ws_send(json.dumps({
|
||||
"type":"candidate",
|
||||
"candidate":candidate,
|
||||
}))
|
||||
@ -76,14 +80,14 @@ class RTCLLM:
|
||||
'info':data['from'],
|
||||
'pc':pc
|
||||
})
|
||||
await self.ws.send(json.dumps({'type':'callAccepted', 'to':data['from']}))
|
||||
await self.ws_send(json.dumps({'type':'callAccepted', 'to':data['from']}))
|
||||
|
||||
async def pc_track(self, peerid, track):
|
||||
peer = self.peers[peerid]
|
||||
pc = peer.pc
|
||||
if track.kind == 'audio':
|
||||
f = partial(self.vad_voiceend, peer)
|
||||
vadtrack = AudioTrackVad(track, stage=3, onvoiceend=f)
|
||||
vadtrack = AudioTrackVad(track, stage=0, onvoiceend=f)
|
||||
peer.vadtrack = vadtrack
|
||||
vadtrack.start_vad()
|
||||
|
||||
@ -98,6 +102,7 @@ class RTCLLM:
|
||||
async def pc_connectionState_changed(self, peerid):
|
||||
peer = self.peers[peerid]
|
||||
pc = peer.pc
|
||||
print('conn_state={pc.connectionState} ...........')
|
||||
peer.audiotrack = MyAudioTrack()
|
||||
peer.videotrack = MyVideoTrack()
|
||||
pc.addTrack(peer.audiotrack)
|
||||
@ -115,8 +120,6 @@ class RTCLLM:
|
||||
k:v for k,v in self.peers.items() if k != peerid
|
||||
}
|
||||
self.peers = peers
|
||||
if len([k for k in self.peers.keys()]) == 0:
|
||||
await self.ws.close()
|
||||
|
||||
def play_video(self, peerid):
|
||||
print('play video ........................')
|
||||
@ -146,11 +149,19 @@ class RTCLLM:
|
||||
await pc.setRemoteDescription(offer)
|
||||
answer = await pc.createAnswer()
|
||||
await pc.setLocalDescription(answer)
|
||||
await self.ws.send(json.dumps({
|
||||
await self.ws_send(json.dumps({
|
||||
'type':'answer',
|
||||
'answer':{'type':pc.localDescription.type, 'sdp':pc.localDescription.sdp},
|
||||
'to':data['from']
|
||||
}))
|
||||
|
||||
offer = await pc.createOffer()
|
||||
await pc.setLocalDescription(offer)
|
||||
await self.ws_send(json.dumps({
|
||||
'type':'offer',
|
||||
'offer': {'type':pc.localDescription.type, 'sdp':pc.localDescription.dsp},
|
||||
'to':data['from']
|
||||
}))
|
||||
|
||||
async def accept_answer(self, data):
|
||||
pc = self.get_pc(data)
|
||||
@ -167,6 +178,7 @@ class RTCLLM:
|
||||
foundation = candidate['candidate'].split(' ')[0]
|
||||
component = candidate['candidate'].split(' ')[1]
|
||||
type = candidate['candidate'].split(' ')[7]
|
||||
"""
|
||||
rtc_candidate = RTCIceCandidate(
|
||||
ip=ip,
|
||||
port=port,
|
||||
@ -178,6 +190,10 @@ class RTCLLM:
|
||||
sdpMid=candidate['sdpMid'],
|
||||
sdpMLineIndex=candidate['sdpMLineIndex']
|
||||
)
|
||||
"""
|
||||
rtc_candidate = candidate_from_sdp(candidate['candidate'].split(":", 1)[1])
|
||||
rtc_candidate.sdpMid = candidate['sdpMid']
|
||||
rtc_candidate.sdpMLineIndex = candidate['sdpMLineIndex']
|
||||
await pc.addIceCandidate(rtc_candidate)
|
||||
print('addIceCandidate ok')
|
||||
|
||||
|
@ -22,7 +22,7 @@ class AudioTrackVad(MediaStreamTrack):
|
||||
# frameSize = self.track.getSettings().frameSize
|
||||
# self.frame_duration_ms = (1000 * frameSize) / self.sample_rate
|
||||
self.frame_duration_ms = 0.00002
|
||||
self.num_padding_frames = 10
|
||||
self.num_padding_frames = 20
|
||||
self.ring_buffer = collections.deque(maxlen=self.num_padding_frames)
|
||||
self.triggered = False
|
||||
self.voiced_frames = []
|
||||
@ -51,12 +51,12 @@ class AudioTrackVad(MediaStreamTrack):
|
||||
|
||||
async def recv(self):
|
||||
oldf = await self.track.recv()
|
||||
self.sample_rate = oldf.sample_rate
|
||||
frames = self.resample(oldf)
|
||||
for f in frames:
|
||||
if self.debug:
|
||||
self.debug = False
|
||||
print(f'{type(f)}, {f.samples=}, {f.format.bytes=}, {f.sample_rate=}, {f.format=}, {f.is_corrupt=}, {f.layout=}, {f.planes=}, {f.side_data=}')
|
||||
self.sample_rate = f.sample_rate
|
||||
try:
|
||||
await self.vad_check(f)
|
||||
except Exception as e:
|
||||
@ -69,10 +69,10 @@ class AudioTrackVad(MediaStreamTrack):
|
||||
self.task = self.loop.call_later(self.frame_duration_ms, self._recv)
|
||||
return f
|
||||
|
||||
def resample(self, frame):
|
||||
fmt = AudioFormat('s16')
|
||||
al = AudioLayout(1)
|
||||
r = AudioResampler(format=fmt, layout=al, rate=frame.rate)
|
||||
def resample(self, frame, sample_rate=None):
|
||||
if sample_rate is None:
|
||||
sample_rate = frame.rate
|
||||
r = AudioResampler(format='s16', layout='mono', rate=sample_rate)
|
||||
frame = r.resample(frame)
|
||||
return frame
|
||||
|
||||
@ -104,14 +104,19 @@ class AudioTrackVad(MediaStreamTrack):
|
||||
# audio we've collected.
|
||||
if num_unvoiced > 0.9 * self.ring_buffer.maxlen:
|
||||
self.triggered = False
|
||||
audio_data = b''.join([self.frame2bytes(f) for f in self.voiced_frames])
|
||||
# audio_data = b''.join([self.frame2bytes(f) for f in self.voiced_frames])
|
||||
# await self.write_wave(audio_data)
|
||||
await self.gen_base64(audio_data)
|
||||
await self.gen_base64()
|
||||
self.ring_buffer.clear()
|
||||
self.voiced_frames = []
|
||||
|
||||
print('end voice .....', len(self.voiced_frames))
|
||||
async def gen_base64(self, audio_data):
|
||||
async def gen_base64(self):
|
||||
lst = []
|
||||
for f in self.voiced_frames:
|
||||
fs = self.resample(f, sample_rate=16000)
|
||||
lst += fs
|
||||
audio_data = b''.join([self.frame2bytes(f) for f in lst])
|
||||
b64 = base64.b64encode(audio_data).decode('utf-8')
|
||||
if self.onvoiceend:
|
||||
await self.onvoiceend(b64)
|
||||
|
Loading…
Reference in New Issue
Block a user