准备:

  1. 信令服务
  2. 前端页面用于视频通话

demo github 地址。

前端页面

为了使 demo 尽量简单,功能页面如下,即包含登录、通过对方手机号拨打电话的功能。在实际生成过程中,未必使用的手机号,可能是任何能代表用户身份的字符串。

代码如下:

  1. <!DOCTYPE html>
  2. <html lang="en">
  3. <head>
  4. <meta charset="UTF-8">
  5. <title>Title</title>
  6. </head>
  7. <body>
  8. <div style="margin: 20px">
  9. <label for="loginAccount">登录账号</label><input id="loginAccount" name="loginAccount" placeholder="请输入手机号"
  10. type="text">
  11. <button id="login" onclick="login()" type="button">登录</button>
  12. </div>
  13. <div style="margin: 20px">
  14. <video autoplay controls height="360px" id="localVideo" width="640px"></video>
  15. <video autoplay controls height="360px" id="remoteVideo" width="640px"></video>
  16. </div>
  17. <div style="margin: 20px">
  18. <label for="toAccount">对方账号</label>
  19. <input id="toAccount" name="toAccount" placeholder="请输入对方手机号" type="text">
  20. <button id="requestVideo" onclick="requestVideo()" type="button">请求视频通话</button>
  21. </div>
  22. <div style="margin: 20px">
  23. <fieldset>
  24. <button id="accept" type="button">接通</button>
  25. <button id="hangup" type="button">挂断</button>
  26. </fieldset>
  27. </div>
  28. <div style="margin: 20px">
  29. <fieldset>
  30. <div>
  31. 录制格式: <select disabled id="codecPreferences"></select>
  32. </div>
  33. <button id="startRecord" onclick="startRecording()" type="button">开始录制视频</button>
  34. <button id="stopRecord" onclick="stopRecording()" type="button">停止录制视频</button>
  35. <button id="downloadRecord" onclick="download()" type="button">下载</button>
  36. </fieldset>
  37. </div>
  38. </body>
  39. <script>
  40. let config = {
  41. iceServers: [
  42. {
  43. 'urls': 'turn:turn.wildfirechat.cn:3478',
  44. 'credential': 'wfchat',
  45. 'username': 'wfchat'
  46. }
  47. ]
  48. }
  49. const localVideo = document.getElementById('localVideo');
  50. const remoteVideo = document.getElementById('remoteVideo');
  51. const requestVideoButton = document.getElementById('requestVideo');
  52. const acceptButton = document.getElementById('accept');
  53. const hangupButton = document.getElementById('hangup');
  54. const codecPreferences = document.querySelector('#codecPreferences');
  55. const recordButton = document.getElementById('startRecord')
  56. const stopRecordButton = document.getElementById('stopRecord')
  57. const downloadButton = document.getElementById('downloadRecord')
  58. const wsAddress = 'ws://localhost:9113/ws';
  59. let loginAttemptCount = 0;
  60. let myId, toId;
  61. let pc, localStream, ws;
  62. let mediaRecorder;
  63. let recordedBlobs;
  64. function login() {
  65. loginAttemptCount = 0;
  66. myId = document.getElementById('loginAccount').value;
  67. ws = new WebSocket(wsAddress);
  68. ws.onopen = function () {
  69. console.log("WebSocket is open now.");
  70. connect();
  71. alert("登录成功");
  72. };
  73. ws.onmessage = function (message) {
  74. let msg = JSON.parse(message.data);
  75. console.log("ws 收到消息:" + msg.type);
  76. switch (msg.type) {
  77. case "offline": {
  78. if (loginAttemptCount < 10) {
  79. setTimeout(() => {
  80. loginAttemptCount++;
  81. watch();
  82. }, 1000);
  83. }
  84. break;
  85. }
  86. case "watch": {
  87. handleWatch(msg);
  88. break;
  89. }
  90. case "offer": {
  91. handleOffer(msg);
  92. break;
  93. }
  94. case "answer": {
  95. handleAnswer(msg);
  96. break;
  97. }
  98. case "candidate": {
  99. handleCandidate(msg);
  100. break;
  101. }
  102. case "hangup": {
  103. handleHangup(msg);
  104. break;
  105. }
  106. }
  107. };
  108. }
  109. requestVideoButton.onclick = async () => {
  110. toId = document.getElementById('toAccount').value;
  111. if (!myId) {
  112. alert('请先登录');
  113. return;
  114. }
  115. if (!toId) {
  116. alert('请输入对方手机号');
  117. return;
  118. }
  119. watch();
  120. localStream = await navigator.mediaDevices.getUserMedia({audio: true, video: true});
  121. localVideo.srcObject = localStream;
  122. createPeerConnection();
  123. }
  124. function connect() {
  125. send({
  126. type: "connect",
  127. from: myId
  128. });
  129. }
  130. function handleWatch(msg) {
  131. toId = msg.from;
  132. }
  133. acceptButton.onclick = async () => {
  134. localStream = await navigator.mediaDevices.getUserMedia({audio: true, video: true});
  135. localVideo.srcObject = localStream;
  136. createPeerConnection();
  137. pc.createOffer().then(offer => {
  138. pc.setLocalDescription(offer);
  139. send({
  140. type: 'offer',
  141. from: myId,
  142. to: toId,
  143. data: offer
  144. });
  145. });
  146. }
  147. function handleOffer(msg) {
  148. pc.setRemoteDescription(msg.data);
  149. pc.createAnswer().then(answer => {
  150. pc.setLocalDescription(answer);
  151. send({
  152. type: "answer",
  153. from: myId,
  154. to: toId,
  155. data: answer
  156. });
  157. });
  158. }
  159. function watch() {
  160. send({
  161. type: 'watch',
  162. from: myId,
  163. to: toId
  164. });
  165. }
  166. function handleAnswer(msg) {
  167. if (!pc) {
  168. console.error('no peer connection');
  169. return;
  170. }
  171. pc.setRemoteDescription(msg.data);
  172. }
  173. function handleCandidate(msg) {
  174. if (!pc) {
  175. console.error('no peer connection');
  176. return;
  177. }
  178. pc.addIceCandidate(new RTCIceCandidate(msg.data)).then(() => {
  179. console.log('candidate添加成功')
  180. }).catch(handleError)
  181. }
  182. function handleError(error) {
  183. console.log(error);
  184. }
  185. function createPeerConnection() {
  186. pc = new RTCPeerConnection(config);
  187. pc.onicecandidate = e => {
  188. if (e.candidate) {
  189. send({
  190. type: "candidate",
  191. from: myId,
  192. to: toId,
  193. data: e.candidate
  194. });
  195. }
  196. };
  197. pc.ontrack = e => remoteVideo.srcObject = e.streams[0];
  198. localStream.getTracks().forEach(track => pc.addTrack(track, localStream));
  199. }
  200. hangupButton.onclick = async () => {
  201. if (pc) {
  202. pc.close();
  203. pc = null;
  204. }
  205. if (localStream) {
  206. localStream.getTracks().forEach(track => track.stop());
  207. localStream = null;
  208. }
  209. send({
  210. type: "hangup",
  211. from: myId,
  212. to: toId
  213. });
  214. }
  215. function handleHangup() {
  216. if (!pc) {
  217. console.error('no peer connection');
  218. return;
  219. }
  220. pc.close();
  221. pc = null;
  222. if (localStream) {
  223. localStream.getTracks().forEach(track => track.stop());
  224. localStream = null;
  225. }
  226. console.log('hangup');
  227. }
  228. function send(msg) {
  229. ws.send(JSON.stringify(msg));
  230. }
  231. function getSupportedMimeTypes() {
  232. const possibleTypes = [
  233. 'video/webm;codecs=vp9,opus',
  234. 'video/webm;codecs=vp8,opus',
  235. 'video/webm;codecs=h264,opus',
  236. 'video/mp4;codecs=h264,aac',
  237. ];
  238. return possibleTypes.filter(mimeType => {
  239. return MediaRecorder.isTypeSupported(mimeType);
  240. });
  241. }
  242. function startRecording() {
  243. recordedBlobs = [];
  244. getSupportedMimeTypes().forEach(mimeType => {
  245. const option = document.createElement('option');
  246. option.value = mimeType;
  247. option.innerText = option.value;
  248. codecPreferences.appendChild(option);
  249. });
  250. const mimeType = codecPreferences.options[codecPreferences.selectedIndex].value;
  251. const options = {mimeType};
  252. try {
  253. mediaRecorder = new MediaRecorder(remoteVideo.srcObject, options);
  254. } catch (e) {
  255. console.error('Exception while creating MediaRecorder:', e);
  256. alert('Exception while creating MediaRecorder: ' + e);
  257. return;
  258. }
  259. console.log('Created MediaRecorder', mediaRecorder, 'with options', options);
  260. recordButton.textContent = 'Stop Recording';
  261. mediaRecorder.onstop = (event) => {
  262. console.log('Recorder stopped: ', event);
  263. console.log('Recorded Blobs: ', recordedBlobs);
  264. };
  265. mediaRecorder.ondataavailable = handleDataAvailable;
  266. mediaRecorder.start();
  267. console.log('MediaRecorder started', mediaRecorder);
  268. }
  269. function handleDataAvailable(event) {
  270. console.log('handleDataAvailable', event);
  271. if (event.data && event.data.size > 0) {
  272. recordedBlobs.push(event.data);
  273. }
  274. }
  275. function stopRecording() {
  276. mediaRecorder.stop();
  277. }
  278. function download() {
  279. const blob = new Blob(recordedBlobs, {type: 'video/webm'});
  280. const url = window.URL.createObjectURL(blob);
  281. const a = document.createElement('a');
  282. a.style.display = 'none';
  283. a.href = url;
  284. a.download = 'test.webm';
  285. document.body.appendChild(a);
  286. a.click();
  287. setTimeout(() => {
  288. document.body.removeChild(a);
  289. window.URL.revokeObjectURL(url);
  290. }, 100);
  291. }
  292. </script>
  293. </html>

信令服务

基于 JDK 1.8 Spring Boot、Netty 搭建,主要用于解决两个问题:

  1. 确认参与人,即拨打视频电话的人和接通视频电话的人
  2. 提供功能按钮 API,比如:发起视频通话、挂电话、以及 webRTC 建立通信通道

主要功能如下:

  1. switch (event.getType()) {
  2. case "connect": {
  3. USER_MAP.put(event.getFrom(), ctx);
  4. break;
  5. }
  6. case "watch": {
  7. WebRtcEvent watchRequest = new WebRtcEvent();
  8. if (USER_MAP.containsKey(event.getTo())) {
  9. watchRequest.setType("watch");
  10. watchRequest.setFrom(event.getFrom());
  11. watchRequest.setTo(event.getTo());
  12. USER_MAP.get(event.getTo()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(watchRequest)));
  13. } else {
  14. watchRequest.setType("offline");
  15. USER_MAP.get(event.getFrom()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(watchRequest)));
  16. }
  17. break;
  18. }
  19. case "offer": {
  20. WebRtcEvent offerRequest = new WebRtcEvent();
  21. offerRequest.setType("offer");
  22. offerRequest.setFrom(event.getFrom());
  23. offerRequest.setTo(event.getTo());
  24. offerRequest.setData(event.getData());
  25. USER_MAP.get(event.getTo()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(offerRequest)));
  26. break;
  27. }
  28. case "answer": {
  29. WebRtcEvent answerRequest = new WebRtcEvent();
  30. answerRequest.setType("answer");
  31. answerRequest.setFrom(event.getFrom());
  32. answerRequest.setData(event.getData());
  33. USER_MAP.get(event.getTo()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(answerRequest)));
  34. break;
  35. }
  36. case "candidate": {
  37. WebRtcEvent candidateRequest = new WebRtcEvent();
  38. candidateRequest.setType("candidate");
  39. candidateRequest.setFrom(event.getFrom());
  40. candidateRequest.setData(event.getData());
  41. USER_MAP.get(event.getTo()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(candidateRequest)));
  42. break;
  43. }
  44. case "hangup": {
  45. WebRtcEvent hangupRequest = new WebRtcEvent();
  46. hangupRequest.setType("hangup");
  47. hangupRequest.setFrom(event.getFrom());
  48. hangupRequest.setTo(event.getTo());
  49. USER_MAP.get(event.getTo()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(hangupRequest)));
  50. break;
  51. }
  52. }

connect -> 登录

与 html 页面中的“登录”按钮对应,当输入手机号后,点击登录,手机号将会在信令服务中存到 map 中,以待后续操作使用。

如下图所示,至少两个客户端登录以后,才能正常视频通话。

watch -> 请求视频通话

点击 watch 按钮后,前端将发送一个事件到信令服务中,结构如下:

  1. {
  2. type: 'watch', //事件类型
  3. from: 13789122381, // 我的账号,比如 13789122381
  4. to: 1323493929 // 对方的账号,比如 1323493929
  5. }

此时输入的对方账号对应 “to” 字段。

信令服务器收到 watch 事件后,从 map 中找出对应的在线客户端,将该事件转发至相应的客户端中。

offer -> 接通

对于接收者来说,点击“接通”按钮以后,webRTC 将开始建立通信隧道。

接通的 json 结构如下:

  1. {
  2. type: 'offer',
  3. from: myId,
  4. to: toId,
  5. data: offer
  6. }

整个拨打电话、接通的流程如下:

总结

在 html 中还需要配置 coturn TURN 服务 地址,我在 demo 中使用的地址是测试地址,所以请不要在生产中使用。

webRTC demo的更多相关文章

  1. WebRTC Demo - getUserMedia()

    WebRTC介绍 WebRTC提供三类API: MediaStream,即getUserMedia RTCPeerConnection RTCDataChannel getUserMedia已经由Ch ...

  2. Qt WebRTC demo

    This is a very simple demonstration of how to stream from a native application to the browser using ...

  3. WebRTC学习与DEMO资源一览

    一. WebRTC学习 1.1   WebRTC现状 本人最早接触WebRTC是在2011年底,那时Google已经在Android源码中加入了webrtc源码,放在/external/webrtc/ ...

  4. Android IOS WebRTC 音视频开发总结(六二)-- 大数据解密国外实时通讯行业开发现状

    本文主要介绍国外实时通讯行业现状,文章最早发表在我们的微信公众号上,详见这里,欢迎关注微信公众号blackerteam,更多详见www.blackerteam.com 上篇文章我们采用百度搜索指数来分 ...

  5. webrtc学习(一): webrtc开始

    一. 编译webrtc 1. 预先准备 1)  vpn. 用于同步代码. 这里给一个大概的估计吧. windows端包含vs2013 win8sdk wdk chromium源码等等, 总共需要至少8 ...

  6. WebRTC学习笔记_Demo收集

    1.     WebRTC学习 1.1   WebRTC现状 本人最早接触WebRTC是在2011年底,那时Google已经在Android源代码中增加了webrtc源代码,放在/external/w ...

  7. WebRTC–getUserMedia & Canvas

    下面是一个使用getUserMedia接口和Canvas的drawImage方法实现的截图功能(截取视频中的一帧). 基本思路是这样子的: getUserMedia获取一个MediaStream, s ...

  8. iOS下WebRTC音视频通话(二)-局域网内音视频通话

    这里是iOS 下WebRTC音视频通话开发的第二篇,在这一篇会利用一个局域网内音视频通话的例子介绍WebRTC中常用的API. 如果你下载并编译完成之后,会看到一个iOS 版的WebRTC Demo. ...

  9. WebRTC技术调研

    相关网址: 协议:https://www.w3.org/TR/webrtc/ https://apprtc.webrtc.org/ https://apprtc.appspot.com/ https: ...

随机推荐

  1. java-XML使用

    XML文件一.XML用途:指可扩展标记语言(EXtensible Markup Language),是独立于软件和硬件的信息传输工具,应用于 web 开发的许多方面,常用于简化数据的存储和共享.二.在 ...

  2. UI自动化框架搭建之Python3

    UI自动化框架搭建--unittest 使用的代码是Python3版本,与时俱进哈哈 解释一下我的框架目录接口(每个人框架的目录接口不一样,根据实际要求) common目录:公共模块,这个地方可以存放 ...

  3. 虚拟化之mdev-vfio笔记

    [root@master mdev]# vi Makefile # SPDX-License-Identifier: GPL-2.0-only mdev-y := mdev_core.o mdev_s ...

  4. 创建Prism项目

    1.创建Prism Prism是一个用于WPF.Xamarin Form.Uno平台和 WinUI 中构建松散耦合.可维护和可测试的XAML应用程序框架 通过以下方式访问.使用.学习它: https: ...

  5. 【java】学习路线7-继承、super方法、重写、重载

    /*继承-java只有单继承如果你创建了很多个class,但是之间有很多相同的成员变量和成员方法,修改的时候又要多处修改好麻烦,此时就可以创建多一个类来存储这些重复的东西,统一管理.相当方便.*//* ...

  6. SpringBoot 整合 MongoDB 实战介绍

    一.介绍 在前面的文章中,我们详细的介绍了 MongoDB 的配置和使用,如果你对 MongoDB 还不是很了解,也没关系,在 MongoDB 中有三个比较重要的名词:数据库.集合.文档! 数据库(D ...

  7. django_day02

    django_day02 外键 表示一对多 多对一 class Book(models.Model): name = models.CharField(max_length=32) publisher ...

  8. KingbaseES R3 读写分离集群在线扩容案例

    案例说明: 1. 通过sys_basebackup创建新备库. 2. 将备库加入到Cluster nodes管理,可以用kingbase_monitor.sh一键启停. 3. 主备复制切换测试. 此次 ...

  9. Kibana:Canvas入门

  10. Elasticsearch: Cerebro 用户界面介绍