diff --git a/rtcllm/aav.py b/rtcllm/aav.py index d0f623a..893184c 100644 --- a/rtcllm/aav.py +++ b/rtcllm/aav.py @@ -5,12 +5,11 @@ from aiortc import MediaStreamTrack, VideoStreamTrack, AudioStreamTrack class MyMediaPlayer(MediaPlayer): pass -class MyTrackBase(MediaStreamTrack): +class MyTrackBase: def __init__(self, source=None): - super().__init__() self.source = source self.set_source_track() - print(f'{self.kind=}, {self.__class__.__name__}') + print(f'{self.kind=}, {self.__class__.__name__}, {dir(self)}') def set_source_track(self): if self.kind == 'audio': @@ -27,7 +26,7 @@ class MyTrackBase(MediaStreamTrack): self.set_source_track() async def recv(self): - print(f'{self.__class__.__name__}, {self.source.duration=}, {self.source.time=}') + print(f'============{self.__class__.__name__}, {self.source.duration=}, {self.source.time=}') if self.source is None: return None if self.track.readyState != 'live': @@ -36,8 +35,14 @@ class MyTrackBase(MediaStreamTrack): return f -class MyAudioStreamTrack(MyTrackBase): - kind = 'audio' +class MyAudioStreamTrack(MyTrackBase, AudioStreamTrack): + def __init__(self, source=None): + AudioStreamTrack.__init__(self) + MyTrackBase.__init__(self, source) + print(dir(self), self.__class__.__name__) -class MyVideoStreamTrack(MyTrackBase): - kind = 'video' +class MyVideoStreamTrack(MyTrackBase, VideoStreamTrack): + def __init__(self, source=None): + VideoStreamTrack.__init__(self) + MyTrackBase.__init__(self, source) + print(dir(self), self.__class__.__name__) diff --git a/rtcllm/rtc.py b/rtcllm/rtc.py index 8d85b38..c69e01c 100644 --- a/rtcllm/rtc.py +++ b/rtcllm/rtc.py @@ -41,7 +41,7 @@ async def pc_get_local_candidates(pc, peer): for t in its: for c in t._connection.local_candidates: if c not in peer.l_candidates: - print(f'{c=}, {dir(c)}') + # print(f'{c=}, {dir(c)}') c.sdpMid = str(peer.sdp_id) peer.sdp_id += 1 peer.l_candidates.append(c) @@ -97,7 +97,7 @@ class RTCLLM: 'sdpMid':candidate.sdpMid, 'type': candidate.type } - print('***********on_icecandidate()', candi) + # print('***********on_icecandidate()', candi) await self.ws_send(json.dumps({ "type":"iceCandidate", "to":to, @@ -105,7 +105,7 @@ class RTCLLM: })) async def save_onlineList(self, data): - print(f'{self}, {type(self)}') + # print(f'{self}, {type(self)}') self.onlineList = data.onlineList async def vad_voiceend(self, peer, audio): @@ -199,7 +199,7 @@ class RTCLLM: async def accept_iceCandidate(self, data): pc = self.get_pc(data) candidate = data.candidate - print('accepted candidate=', candidate) + # print('accepted candidate=', candidate) """ rtc_candidate = RTCIceCandidate( ip=ip, @@ -217,7 +217,7 @@ class RTCLLM: rtc_candidate.sdpMid = candidate['sdpMid'] rtc_candidate.sdpMLineIndex = candidate['sdpMLineIndex'] await pc.addIceCandidate(rtc_candidate) - print('addIceCandidate ok') + # print('addIceCandidate ok') handlers = { 'onlineList':save_onlineList, diff --git a/rtcllm/vad.py b/rtcllm/vad.py index 0c94b50..e74178c 100644 --- a/rtcllm/vad.py +++ b/rtcllm/vad.py @@ -15,7 +15,6 @@ class AudioTrackVad(MediaStreamTrack): def __init__(self, track, stage=3, onvoiceend=None): super().__init__() self.track = track - print(dir(track), 'AudioTrackVad.__init__()') self.onvoiceend = onvoiceend self.vad = webrtcvad.Vad(stage) # self.sample_rate = self.track.getSettings().sampleRate @@ -56,7 +55,7 @@ class AudioTrackVad(MediaStreamTrack): for f in frames: if self.debug: self.debug = False - print(f'{type(f)}, {f.samples=}, {f.format.bytes=}, {f.sample_rate=}, {f.format=}, {f.is_corrupt=}, {f.layout=}, {f.planes=}, {f.side_data=}') + # print(f'{type(f)}, {f.samples=}, {f.format.bytes=}, {f.sample_rate=}, {f.format=}, {f.is_corrupt=}, {f.layout=}, {f.planes=}, {f.side_data=}') try: await self.vad_check(f) except Exception as e: @@ -92,7 +91,7 @@ class AudioTrackVad(MediaStreamTrack): for f, s in self.ring_buffer: self.voiced_frames.append(f) self.ring_buffer.clear() - print('start voice .....', len(self.voiced_frames)) + # print('start voice .....', len(self.voiced_frames)) else: # We're in the TRIGGERED state, so collect the audio data # and add it to the ring buffer. @@ -131,7 +130,7 @@ class AudioTrackVad(MediaStreamTrack): """ audio_data = self.to_mono16000_data() path = temp_file(suffix='.wav') - print(f'temp_file={path}') + # print(f'temp_file={path}') with contextlib.closing(wave.open(path, 'wb')) as wf: wf.setnchannels(1) @@ -139,8 +138,8 @@ class AudioTrackVad(MediaStreamTrack): wf.setframerate(16000) wf.writeframes(audio_data) - print('************wrote*******') + # print('************wrote*******') if self.onvoiceend: await self.onvoiceend(path) - print('************over*******') + # print('************over*******')