vite+vue3+websocket处理音频流发送到后端

发布于:2025-08-01 ⋅ 阅读:(15) ⋅ 点赞:(0)
<script setup lang="ts">
import { ref, onMounted } from 'vue'
import { useIsMystore } from '@/store/index'
const myStore = useIsMystore()
const emit = defineEmits(['talkMsg'])

const meetingInfo = JSON.parse(myStore.userInfo).meetingInfo
const userInfo = JSON.parse(myStore.userInfo).user
const currentSessionId = ref(null);
const eventSource = ref(null);
const isLlmEnabled = ref(false);
const lastRecognitionText = ref(null);
const mediaRecorder = ref(null);
const audioContext = ref(null);
const websocket = ref(null);
const isRecording = ref(false);
// 初始化音频上下文
const initAudio = async () => {
    try {
        // 首先检查浏览器是否支持 getUserMedia
        if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
            throw new Error('您的浏览器不支持访问麦克风。请使用最新版本的 Chrome、Firefox 或 Edge 浏览器。');
        }

        // 列出可用的音频输入设备
        const devices = await navigator.mediaDevices.enumerateDevices();
        const audioInputs = devices.filter(device => device.kind === 'audioinput');

        if (audioInputs.length === 0) {
            throw new Error('未检测到麦克风设备。请确保您的设备已正确连接麦克风。');
        }

        console.log('可用的音频输入设备:', audioInputs);

        const stream = await navigator.mediaDevices.getUserMedia({
            audio: {
                echoCancellation: true,
                noiseSuppression: true,
                autoGainControl: true
            }
        });

        // 创建音频上下文
        const audioContext = new (window.AudioContext || window.webkitAudioContext)({
            sampleRate: 16000
        });

        // 创建音频源
        const source = audioContext.createMediaStreamSource(stream);

        // 创建脚本处理器节点
        const processor = audioContext.createScriptProcessor(4096, 1, 1);

        // 连接节点
        source.connect(processor);
        processor.connect(audioContext.destination);

        // 处理音频数据
        processor.onaudioprocess = (e) => {
            if (websocket.value && websocket.value.readyState === WebSocket.OPEN) {
                // 获取输入数据
                const inputData = e.inputBuffer.getChannelData(0);

                // 转换为 16 位 PCM
                const pcmData = new Int16Array(inputData.length);
                for (let i = 0; i < inputData.length; i++) {
                    pcmData[i] = Math.max(-32768, Math.min(32767, Math.round(inputData[i] * 32768)));
                }

                // 发送数据
                websocket.value.send(pcmData.buffer);
            }
        };

        // 保存引用以便后续清理
        mediaRecorder.value = {
            stream: stream,
            processor: processor,
            source: source,
            context: audioContext,
            stop: () => {
                processor.disconnect();
                source.disconnect();
                stream.getTracks().forEach(track => track.stop());
            }
        };

        return stream;
    } catch (error) {
        console.error('Error accessing microphone:', error);
        let errorMessage = '无法访问麦克风: ';

        if (error.name === 'NotFoundError') {
            errorMessage += '未找到麦克风设备。请确保您的设备已正确连接麦克风。';
        } else if (error.name === 'NotAllowedError') {
            errorMessage += '麦克风访问被拒绝。请在浏览器设置中允许访问麦克风。';
        } else if (error.name === 'NotReadableError') {
            errorMessage += '麦克风可能被其他应用程序占用。请关闭其他使用麦克风的应用程序后重试。';
        } else {
            errorMessage += error.message || '未知错误';
        }

        // updateStatus(errorMessage, true);
        throw error;
    }
}

// 创建 WebSocket 连接
const createWebSocket = (sessionId) => {
    // const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
    // const wsUrl = `${configURl[import.meta.env.MODE].talkSocketUrl}/api/audio/ws/${sessionId}`;
    let wsUrl = `${configURl[import.meta.env.MODE].talkSocketUrl}/seminar/speechWebsocket/${meetingInfo.id}/${userInfo.id}`;
    ws://1.1.1.1:1111/speechWebsocket/{meetingId}/{userId}

    // 使用原生 WebSocket
    websocket.value = new WebSocket(wsUrl);
    websocket.value.onopen = () => {
        console.log('WebSocket connection established');
    };

    websocket.value.onmessage = (event) => {
        const data = JSON.parse(event.data);
        lastRecognitionText.value = data.content;
        console.log(data)
        emit('talkMsg', data)
    };

    websocket.value.onerror = (error) => {
        console.error('WebSocket error:', error);
    };

    websocket.value.onclose = () => {
        console.log('WebSocket connection closed');
    };
}

// 开始录音
const startRecording = async () => {
    try {
        const stream = await initAudio();
        mediaRecorder.value = new MediaRecorder(stream, {
            mimeType: 'audio/webm;codecs=opus',
            audioBitsPerSecond: 16000
        });

        mediaRecorder.value.ondataavailable = (event) => {
            if (event.data.size > 0 && websocket.value && websocket.value.readyState === WebSocket.OPEN) {
                // 将音频数据转换为 ArrayBuffer 并发送
                event.data.arrayBuffer().then(buffer => {
                    websocket.value.send(buffer);
                });
            }
        };

        mediaRecorder.value.start(100); // 每100ms发送一次数据
        isRecording.value = true;
    } catch (error) {
        console.error('Error starting recording:', error);
        // updateStatus('Failed to start recording', true);
    }
}
// 停止录音
const stopRecording = () => {
    if (mediaRecorder.value && isRecording.value) {
        mediaRecorder.value.stop();
        mediaRecorder.value.stream.getTracks().forEach(track => track.stop());
        isRecording.value = false;
    }
    if (websocket.value) {
        websocket.value.disconnect();
    }
}
onMounted(() => {

})
const init = () => {
    createWebSocket()
    startRecording()
}
defineExpose({ init })
</script>