const { useRef, useEffect, useState } = React; const getBaseURL = () => { // This is the permanent WebSocket URL from your deployment. // It connects the chat frontend to its voice backend. return 'wss://sherrybabe1978--quillman-moshi-web.modal.run/ws'; } const App = () => { // Logic State (from your original code) const [recorder, setRecorder] = useState(null); const [audioContext] = useState(() => new (window.AudioContext || window.webkitAudioContext)({ sampleRate: 48000 })); const decoderRef = useRef(null); const socketRef = useRef(null); const scheduledEndTimeRef = useRef(0); // UI State (adapted for the new design) const [status, setStatus] = useState('Connecting...'); const [isMuted, setIsMuted] = useState(true); // Start muted until recorder is ready const [completedSentences, setCompletedSentences] = useState([]); const [pendingSentence, setPendingSentence] = useState(''); const textContainerRef = useRef(null); // --- Core Logic (Mostly Unchanged) --- // Mic Input: start the Opus recorder const startRecording = async () => { try { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); const rec = new Recorder({ encoderPath: "https://cdn.jsdelivr.net/npm/opus-recorder@latest/dist/encoderWorker.min.js", streamPages: true, encoderApplication: 2049, encoderSampleRate: 24000, numberOfChannels: 1, }); rec.ondataavailable = async (arrayBuffer) => { if (socketRef.current?.readyState === WebSocket.OPEN) { await socketRef.current.send(arrayBuffer); } }; await rec.start(); setRecorder(rec); setIsMuted(false); // Unmute automatically on start rec.setRecordingGain(1); setStatus('[LISTENING...]'); } catch (err) { console.error("Error starting recording:", err); setStatus('[ERROR: MIC FAILED]'); } }; // Audio Playback: Prep decoder useEffect(() => { const initializeDecoder = async () => { const decoder = new window["ogg-opus-decoder"].OggOpusDecoder(); await decoder.ready; decoderRef.current = decoder; }; initializeDecoder(); return () => { if (decoderRef.current) decoderRef.current.free(); }; }, []); // Audio Playback: schedule PCM audio chunks const scheduleAudioPlayback = (newAudioData) => { if (!audioContext || newAudioData.length === 0) return; const nowTime = audioContext.currentTime; const newBuffer = audioContext.createBuffer(1, newAudioData.length, audioContext.sampleRate); newBuffer.copyToChannel(newAudioData, 0); const sourceNode = audioContext.createBufferSource(); sourceNode.buffer = newBuffer; sourceNode.connect(audioContext.destination); const startTime = Math.max(scheduledEndTimeRef.current, nowTime); sourceNode.start(startTime); const newEndTime = startTime + newBuffer.duration; scheduledEndTimeRef.current = newEndTime; sourceNode.onended = () => { // If the audio queue is now empty, reset status if (audioContext.currentTime >= scheduledEndTimeRef.current - 0.1) { setStatus(isMuted ? '[MUTED]' : '[LISTENING...]'); } }; }; // WebSocket: open connection and set up handlers useEffect(() => { const endpoint = getBaseURL(); setStatus('Connecting...'); const socket = new WebSocket(endpoint); socketRef.current = socket; socket.onopen = () => { console.log("WebSocket connection opened"); startRecording(); // Automatically start recording }; socket.onmessage = async (event) => { const arrayBuffer = await event.data.arrayBuffer(); const view = new Uint8Array(arrayBuffer); const tag = view[0]; const payload = arrayBuffer.slice(1); if (tag === 1 && decoderRef.current) { // Audio data setStatus('[MODEL IS SPEAKING]'); const { channelData, samplesDecoded } = await decoderRef.current.decode(new Uint8Array(payload)); if (samplesDecoded > 0) { scheduleAudioPlayback(channelData[0]); } } else if (tag === 2) { // Text data const text = new TextDecoder().decode(payload); setPendingSentence(prev => { const updatedPending = prev + text; if (/[.!?]$/.test(updatedPending)) { setCompletedSentences(prevCompleted => [...prevCompleted, updatedPending.trim()]); return ''; } return updatedPending; }); } }; socket.onclose = () => { console.log("WebSocket connection closed"); setStatus('[DISCONNECTED]'); if (recorder) recorder.stop(); }; socket.onerror = (err) => { console.error("WebSocket error:", err); setStatus('[ERROR: CONNECTION FAILED]'); }; return () => { socket.close(); }; }, []); // Effect to auto-scroll the text output useEffect(() => { if (textContainerRef.current) { textContainerRef.current.scrollTop = textContainerRef.current.scrollHeight; } }, [completedSentences, pendingSentence]); // --- UI Action Handlers --- const handleToggleMute = () => { if (!recorder) return; const nextMutedState = !isMuted; recorder.setRecordingGain(nextMutedState ? 0 : 1); setIsMuted(nextMutedState); // Only change status if the model is not currently speaking if (status !== '[MODEL IS SPEAKING]') { setStatus(nextMutedState ? '[MUTED]' : '[LISTENING...]'); } }; const fullText = [...completedSentences, pendingSentence].join(' '); // --- Render Logic (Completely Rewritten for New Design) --- return (
Powered by Kyutai Moshi on Modal
{/* Text Output Area */}{fullText || (status === 'Connecting...' ? 'Warming up model...' : ' ')}
{status}