在分享之前先贴上借鉴的大佬们的博客,感谢这些巨人,关于录音:https://blog.csdn.net/sweetsuzyhyf/article/details/50469881 

 
废话:想不到我的第一篇博客是关于前端,作为一名后端的小菜,前端方面肯定还有很多不足之处,如果文章有任何问题欢迎指正。感谢大家。好了!废话不多说下面讲一下需求。
需求:公司要求实现web端的录音并通过websocket实时上传至java后台,而且能通过vlc实时播放,简单一点讲就是我用网页在那一边讲话,一个大喇叭就能实时把我的话播出去,这样是不是通俗易懂呀,而且呢公司要求用mp3格式。当然啦!为了知道自己在讲话需要一个波形图,这里主要实现前半部分功能,后半部分臣妾也做不到呀!后半部分的vlc播放呢如果大家想知道,可以留言,届时可以给大家指条明路
 
前端实现:
引入:<script type="text/javascript" src="/js/recorder/recordmp3.js"></script>
这个跟大佬的js有点不一样,我在里面加了一点东西,而且在这个js里面引入了两个另外的js,lame.min.js和worker-realtime.js,这俩在大佬的代码里有
页面:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
    <meta http-equiv="Content-Type" content="text/html; charset=gb2312"/>
    <title>测试</title>
</head>
<body>
<button id="intercomBegin">开始对讲</button>
<button id="intercomEnd">关闭对讲</button>
<canvas id="casvased" style="width: 400px;height: 100px"></canvas>
</body>
<script type="text/javascript" src="/js/jquery-3.3.1.js"></script>
<script type="text/javascript" src="/js/recorder/recordmp3.js"></script>
<script type="text/javascript">
    var begin = document.getElementById('intercomBegin');
    var end = document.getElementById('intercomEnd');

    var canvas = document.getElementById("casvased");
    var canvasCtx = canvas.getContext("2d");

    var ws = null; //实现WebSocket

    var recorder;

    /*
    * WebSocket
    */
    function useWebSocket() {
        ws = new WebSocket("ws://127.0.0.1:8089/send/voice");
        ws.binaryType = 'arraybuffer'; //传输的是 ArrayBuffer 类型的数据
        ws.onopen = function () {
            console.log('握手成功');
            if (ws.readyState == 1) { //ws进入连接状态,则每隔500毫秒发送一包数据
                recorder.start();
            }
        };

        ws.onmessage = function (msg) {
            console.info(msg)
        }

        ws.onerror = function (err) {
            console.info(err)
        }
    }

    /*
    * 开始对讲
    */
    begin.onclick = function () {
        recorder = new MP3Recorder({
            debug: true,
            funOk: function () {
                console.log('点击录制,开始录音! ');
            },
            funCancel: function (msg) {
                console.log(msg);
                recorder = null;
            }
        });
    }

    /*
    * 关闭对讲
    */
    end.onclick = function () {
        if (ws) {
            ws.close();
            recorder.stop();
            console.log('关闭对讲以及WebSocket');
        }
    }

    var sendData = function() { //对以获取的数据进行处理(分包)
        var reader = new FileReader();
        reader.onload = e => {
            var outbuffer = e.target.result;
            var arr = new Int8Array(outbuffer);
            if (arr.length > 0) {
                var tmparr = new Int8Array(1024);
                var j = 0;
                for (var i = 0; i < arr.byteLength; i++) {
                    tmparr[j++] = arr[i];
                    if (((i + 1) % 1024) == 0) {
                        ws.send(tmparr);
                        if (arr.byteLength - i - 1 >= 1024) {
                            tmparr = new Int8Array(1024);
                        } else {
                            tmparr = new Int8Array(arr.byteLength - i - 1);
                        }
                        j = 0;
                    }
                    if ((i + 1 == arr.byteLength) && ((i + 1) % 1024) != 0) {
                        ws.send(tmparr);
                    }
                }
            }
        };
        recorder.getMp3Blob(function (blob) {
            reader.readAsArrayBuffer(blob);//这里拿到mp3格式的音频流写入到reader中
})
  }; </script> </html>

recordmp3.js

(function (exports) {

    var MP3Recorder = function (config) {

        var recorder = this;
        config = config || {};
        config.sampleRate = config.sampleRate || 44100;
        config.bitRate = config.bitRate || 128;

        navigator.getUserMedia = navigator.getUserMedia ||
            navigator.webkitGetUserMedia ||
            navigator.mozGetUserMedia ||
            navigator.msGetUserMedia;

        if (navigator.getUserMedia) {
            navigator.getUserMedia({
                    audio: true
                },
                function (stream) {
                    var context = new AudioContext(),
                        microphone = context.createMediaStreamSource(stream),
                        processor = context.createScriptProcessor(16384, 1, 1),//bufferSize大小,输入channel数,输出channel数
                        mp3ReceiveSuccess, currentErrorCallback;

                    var height = 100;
                    var width = 400;

                    const analyser = context.createAnalyser()
                    analyser.fftSize = 1024
                    //连接到音频源
                    microphone.connect(analyser);
                    analyser.connect(context.destination);

                    const bufferLength = analyser.frequencyBinCount // 返回的是 analyser的fftsize的一半
                    const dataArray = new Uint8Array(bufferLength);

                    function draw() {
                        canvasCtx.clearRect(0, 0, width, height); //清除画布
                        analyser.getByteFrequencyData(dataArray); // 将当前频率数据复制到传入其中的Uint8Array
                        const requestAnimFrame = window.requestAnimationFrame(draw) || window.webkitRequestAnimationFrame(draw);
                        canvasCtx.fillStyle = '#000130';
                        canvasCtx.fillRect(0, 0, width, height);
                        let barWidth = (width / bufferLength) * 2;
                        let barHeight;
                        let x = 0;
                        let c = 2
                        for (let i = 0; i < bufferLength; i++) {
                            barHeight = c+(dataArray[i]/400)*height;
                            canvasCtx.fillStyle = 'rgb(0, 255, 30)';
                            canvasCtx.fillRect(x, height / 2 - barHeight / 2, barWidth, barHeight);
                            x += barWidth + 1;
                        }
                    }

                    draw();

                    useWebSocket();
                    config.sampleRate = context.sampleRate;
                    processor.onaudioprocess = function (event) {
                        //边录音边转换
                        var array = event.inputBuffer.getChannelData(0);
                        realTimeWorker.postMessage({cmd: 'encode', buf: array});
                        sendData();
                    };

                    var realTimeWorker = new Worker('/js/recorder/worker-realtime.js');
                    realTimeWorker.onmessage = function (e) {
                        switch (e.data.cmd) {
                            case 'init':
                                log('初始化成功');
                                if (config.funOk) {
                                    config.funOk();
                                }
                                break;
                            case 'end':
                                log('MP3大小:', e.data.buf.length);
                                if (mp3ReceiveSuccess) {
                                    mp3ReceiveSuccess(new Blob(e.data.buf, {type: 'audio/mp3'}));
                                }
                                break;
                            case 'error':
                                log('错误信息:' + e.data.error);
                                if (currentErrorCallback) {
                                    currentErrorCallback(e.data.error);
                                }
                                break;
                            default:
                                log('未知信息:', e.data);
                        }
                    };

                    recorder.getMp3Blob = function (onSuccess, onError) {
                        currentErrorCallback = onError;
                        mp3ReceiveSuccess = onSuccess;
                        realTimeWorker.postMessage({cmd: 'finish'});
                    };

                    recorder.start = function () {
                        if (processor && microphone) {
                            microphone.connect(processor);
                            processor.connect(context.destination);
                            log('开始录音');
                        }
                    }

                    recorder.stop = function () {
                        if (processor && microphone) {
                            microphone.disconnect();
                            processor.disconnect();
                            log('录音结束');
                        }
                    }

                    realTimeWorker.postMessage({
                        cmd: 'init',
                        config: {
                            sampleRate: config.sampleRate,
                            bitRate: config.bitRate
                        }
                    });
                },
                function (error) {
                    var msg;
                    switch (error.code || error.name) {
                        case 'PERMISSION_DENIED':
                        case 'PermissionDeniedError':
                            msg = '用户拒绝访问麦客风';
                            break;
                        case 'NOT_SUPPORTED_ERROR':
                        case 'NotSupportedError':
                            msg = '浏览器不支持麦客风';
                            break;
                        case 'MANDATORY_UNSATISFIED_ERROR':
                        case 'MandatoryUnsatisfiedError':
                            msg = '找不到麦客风设备';
                            break;
                        default:
                            msg = '无法打开麦克风,异常信息:' + (error.code || error.name);
                            break;
                    }
                    if (config.funCancel) {
                        config.funCancel(msg);
                    }
                });
        } else {
            if (config.funCancel) {
                config.funCancel('当前浏览器不支持录音功能');
            }
        }

        function log(str) {
            if (config.debug) {
                console.log(str);
            }
        }
    }

    exports.MP3Recorder = MP3Recorder;

})(window);

后端websocket:

这里实现的是保存为mp3文件

package com.jetosend.common.socket;

import com.jetosend.common.utils.Utils;
import org.springframework.stereotype.Component;

import javax.websocket.*;
import javax.websocket.server.PathParam;
import javax.websocket.server.ServerEndpoint;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.Hashtable;
import java.util.Map;

@ServerEndpoint("/send/{key}")
@Component
public class ServerSocket {

    private static final Map<String, Session> connections = new Hashtable<>();
    ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();

    /***
     * @Description:打开连接
     * @Param: [id, 保存对方平台的资源编码
     * session]
     * @Return: void
     * @Author: Liting
     * @Date: 2019-10-10 09:22
     */
    @OnOpen
    public void onOpen(@PathParam("key") String id, Session session) {
        System.out.println(id + "连上了");
        connections.put(id, session);
    }

    /**
     * 接收消息
     */
    @OnMessage
    public void onMessage(@PathParam("key") String id, InputStream inputStream) {
        System.out.println("来自" + id);
        try {
            int rc = 0;
            byte[] buff = new byte[100];
            while ((rc = inputStream.read(buff, 0, 100)) > 0) {
                byteArrayOutputStream.write(buff, 0, rc);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    /**
     * 异常处理
     *
     * @param throwable
     */
    @OnError
    public void onError(Throwable throwable) {
        throwable.printStackTrace();
        //TODO 日志打印异常
    }

    /**
     * 关闭连接
     */
    @OnClose
    public void onClose(@PathParam("key") String id) {
        System.out.println(id + "断开");
        BufferedOutputStream bos = null;
        FileOutputStream fos = null;
        File file = null;
        try {
            file = new File("D:\testtest.mp3");

            //输出流
            fos = new FileOutputStream(file);

            //缓冲流
            bos = new BufferedOutputStream(fos);

            //将字节数组写出
            bos.write(byteArrayOutputStream.toByteArray());
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (bos != null) {
                try {
                    bos.close();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
            if (fos != null) {
                try {
                    fos.close();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        }

        connections.remove(id);
    }

实现效果:

 

好了,到这里就是全部了,如有疑问可以留言
内容来源于网络如有侵权请私信删除

文章来源: 博客园

原文链接: https://www.cnblogs.com/duojiao/p/13086616.html

你还没有登录,请先登录注册
  • 还没有人评论,欢迎说说您的想法!

相关课程