我是靠谱客的博主 柔弱饼干,最近开发中收集的这篇文章主要介绍vue实现拍照人脸识别功能带人脸选中框实现步骤:***demo到此结束了***参考文章:https://blog.csdn.net/qingwu1104/article/details/100881658此外:上面的是案例,下面是另一份应用,不予解释,,觉得挺不错的,现在分享给大家,希望可以做个参考。

概述

前言:

       实现打开摄像头,并识别人脸。

实现效果:

实现步骤:

一、安装

(1)官网下载 tracking.js 的代码包官网入口

(2)npm下载

执行命令: cnpm install tracking --save

二、demo案例代码

<template>
    <div class="testTracking">
        <video
                id="video"
                width="1000"
                height="700"
                preload
                autoplay
                loop
                muted
        ></video>
        <canvas id="canvas" width="1000" height="700"></canvas>
        <div class="buttonDiv">
            <button type="button" @click="submit" style="font-size: 3vw;">提取照片</button>
            <button type="button" name="button" @click="checkFace" style="font-size: 3vw;">检测人脸</button>
            <button type="button" name="button" @click="getCompetence" style="font-size: 3vw;">
                打开摄像头
            </button>
            <button type="button" name="button" @click="de" style="font-size: 3vw;">停</button>
        </div>
    </div>
</template>

<script>
    require("tracking/build/tracking-min.js");
    require("tracking/build/data/face-min.js");
    require("tracking/build/data/mouth-min.js");
    require("tracking/examples/assets/stats.min.js");
    export default {
        name: "testTracking",
        data() {
            return {};
        },
        methods: {
            checkFace() {
                var video = document.getElementById("video");
                var canvas = document.getElementById("canvas");
                var context = canvas.getContext("2d");

                var tracker = new tracking.ObjectTracker("face");
                tracker.setInitialScale(4);
                tracker.setStepSize(2);
                tracker.setEdgesDensity(0.1);

                this.trackerTask = tracking.track("#video", tracker, { camera: true });

                tracker.on("track", function (event) {
                    if (event.data.length <= 0) {
                        return;
                    }
                    context.clearRect(0, 0, canvas.width, canvas.height);
                    event.data.forEach(function (rect) {
                        context.strokeStyle = '#a64ceb';
                        context.strokeRect(rect.x, rect.y, rect.width, rect.height);
                        context.font = '11px Helvetica';
                        context.fillStyle = "#fff";
                        context.fillText('x: ' + rect.x + 'px', rect.x + rect.width + 5, rect.y + 11);
                        context.fillText('y: ' + rect.y + 'px', rect.x + rect.width + 5, rect.y + 22);
                    });
                });
            },
            submit() {
                let that = this;
                let canvas = document.getElementById("canvas");
                let context = canvas.getContext("2d");
                let video = document.getElementById("video");
                context.drawImage(video, 0, 0, 1000, 700);
                canvas.toBlob((blob) => {
                    // axios.post({ faceUrl: URL.createObjectURL(blob) }).then((res) => {
                    //   console.log("上传成功");
                    // });
                    var reader = new FileReader();
                    reader.readAsDataURL(blob);
                    console.log(reader);
                });
            },
            getCompetence() {
                var _this = this;
                this.thisCancas = document.getElementById("canvas");
                this.thisContext = this.thisCancas.getContext("2d");
                this.thisVideo = document.getElementById("video");
                // 旧版本浏览器可能根本不支持mediaDevices,我们首先设置一个空对象
                if (navigator.mediaDevices === undefined) {
                    navigator.mediaDevices = {};
                }
                // 一些浏览器实现了部分mediaDevices,我们不能只分配一个对象
                // 使用getUserMedia,因为它会覆盖现有的属性。
                // 这里,如果缺少getUserMedia属性,就添加它。
                if (navigator.mediaDevices.getUserMedia === undefined) {
                    navigator.mediaDevices.getUserMedia = function (constraints) {
                        // 首先获取现存的getUserMedia(如果存在)
                        var getUserMedia =
                            navigator.webkitGetUserMedia ||
                            navigator.mozGetUserMedia ||
                            navigator.getUserMedia;
                        // 有些浏览器不支持,会返回错误信息
                        // 保持接口一致
                        if (!getUserMedia) {
                            return Promise.reject(
                                new Error("getUserMedia is not implemented in this browser")
                            );
                        }
                        // 否则,使用Promise将调用包装到旧的navigator.getUserMedia
                        return new Promise(function (resolve, reject) {
                            getUserMedia.call(navigator, constraints, resolve, reject);
                        });
                    };
                }
                var constraints = {
                    audio: false,
                    video: {
                        width: this.videoWidth,
                        height: this.videoHeight,
                        transform: "scaleX(-1)",
                    },
                };
                navigator.mediaDevices
                    .getUserMedia(constraints)
                    .then(function (stream) {
                        // 旧的浏览器可能没有srcObject
                        if ("srcObject" in _this.thisVideo) {
                            _this.thisVideo.srcObject = stream;
                        } else {
                            // 避免在新的浏览器中使用它,因为它正在被弃用。
                            _this.thisVideo.src = window.URL.createObjectURL(stream);
                        }
                        _this.thisVideo.onloadedmetadata = function (e) {
                            _this.thisVideo.play();
                        };
                    })
                    .catch((err) => {
                        console.log(err);
                    });
            },
            de() {
                // 停止侦测
                this.trackerTask.stop();
                // 关闭摄像头
                this.trackerTask.closeCamera();
            },
        },
        destroyed() {
            // 停止侦测
            this.trackerTask.stop();
            // 关闭摄像头
            this.trackerTask.closeCamera();
        },
    };
</script>

<style lang="less" scoped>
    .testTracking {
        height: 100vh;
        width: 100%;
        position: relative;
        > * {
            position: absolute;
            left: 0;
            right: 0;
            margin: auto;
        }
        video,
        canvas {
            top: 0;
        }
        .buttonDiv {
            bottom: 10px;
        }
    }
</style>

***demo到此结束了***

参考文章:https://blog.csdn.net/qingwu1104/article/details/100881658

 

此外:上面的是案例,下面是另一份应用,不予解释,

<template>
    <div style="width: 100%;height: 100%;">
        <Select v-model="modelSel" style="width:200px;margin-bottom:1%;" @on-change="changeSel">
            <Option v-for="(item,index) in videoArr" :value="item.id" :key="index">{{ item.label }}</Option>
        </Select>
        <div class="testTracking">
            <!--开启摄像头-->
            <!--<Button  type="primary"  @click="callCamera" style="margin-right: 10px;">开启摄像头</Button>-->
            <!--<Button @click = 'changePhoto' style="margin-right: 10px;">切换摄像头</Button>-->
            <!--关闭摄像头-->
            <!--<Button  type="primary"  @click="closeCamera">关闭摄像头</Button>-->
            <!--canvas截取流-->
            <canvas style="display:none;" ref="canvas" :width="videoWidth" :height="videoHeight"></canvas>
            <!--图片展示-->
            <video ref="video" id="video" :width="videoWidth" :height="videoHeight" autoplay style="display: block;margin:0 auto;"></video>
            <canvas id="canvas"  :width="videoWidth" :height="videoHeight"></canvas>
            <!--确认-->
            <!--<Button  type="primary" @click="setImage">拍照</Button>-->
            <!--<img :src="imgSrc" alt="" class="tx_img">-->
        </div>



    </div>
</template>
<script>
    require("tracking/build/tracking-min.js");
    require("tracking/build/data/face-min.js");
    require("tracking/build/data/mouth-min.js");
    require("tracking/examples/assets/stats.min.js");
    export default {
        props:['src'],
        data () {
            return {
                videoWidth: 900,
                videoHeight: 700,
                videoArr:[],//所有的摄像头
                modelSel:'',//
                myInterval: null,
                imgSrc: '',
                isHasFace:false,//默认没有人脸
                tracker:null,
            }
        },
        created(){
        },
        mounted(){
            this.callCamera();
            this.changePhoto();
            this.checkFace();
            if(this.src!='user'){
                this.myInterval = setInterval(()=>{
                    this.setImage();
                },2000)
            }
        },
        methods: {
            // 调用摄像头
            callCamera () {
                // H5调用电脑摄像头API
                navigator.mediaDevices.getUserMedia({
                    video: true
                }).then(success => {
                    // 摄像头开启成功
                    this.$refs['video'].srcObject = success
                    // 实时拍照效果
                    this.$refs['video'].play()
                }).catch(error => {
                    console.error('摄像头开启失败,请检查摄像头是否可用!')
                })
            },
            // 拍照
            setImage () {
                console.log(this.isHasFace);
                if(!this.isHasFace){
                    return false;
                }
                let ctx = this.$refs['canvas'].getContext('2d')
                // 把当前视频帧内容渲染到canvas上
                ctx.drawImage(this.$refs['video'], 0, 0,  this.videoWidth, this.videoHeight)
                // 转base64格式、图片格式转换、图片质量压缩---支持两种格式image/jpeg+image/png
                let imgBase64 = this.$refs['canvas'].toDataURL('image/jpeg');
                this.imgSrc = imgBase64;
                this.$emit('refreshDataList', imgBase64)
                return true;
                /**------------到这里为止,就拿到了base64位置的地址,后面是下载功能----------*/

                    // 由字节转换为KB 判断大小
                // let str = imgBase64.replace('data:image/jpeg;base64,', '')
                // let strLength = str.length
                // let fileLength = parseInt(strLength - (strLength / 8) * 2)    // 图片尺寸  用于判断
                // let size = (fileLength / 1024).toFixed(2)
                // console.log(size)     // 上传拍照信息  调用接口上传图片 .........
                //
                // // 保存到本地
                // let ADOM = document.createElement('a')
                // ADOM.href = this.headImgSrc
                // ADOM.download = new Date().getTime() + '.jpeg'
                // ADOM.click()
            },
            // 关闭摄像头
            closeCamera () {
                if (!this.$refs['video'].srcObject) return
                let stream = this.$refs['video'].srcObject
                let tracks = stream.getTracks()
                tracks.forEach(track => {
                    track.stop()
                })
                this.$refs['video'].srcObject = null
            },


            //切换本地摄像头
            changePhoto(){
                /**得到所有的设备*/
                navigator.mediaDevices.enumerateDevices()
                    .then((devices)=> {
                        console.log(devices)
                        this.videoArr = [];
                        devices.forEach((device)=> {
                            if(device.kind == 'videoinput'){
                                this.videoArr.push({
                                    'label': device.label,
                                    'id': device.deviceId
                                })
                            }

                        });
                    })
                    .catch(function(err) {
                        layer.msg(err.name + ": " + err.message);
                    });
            },
            //切换下拉
            changeSel(val){
                const videoConstraints = {};
                if (val === '') {
                    videoConstraints.facingMode = 'environment';
                } else {
                    videoConstraints.deviceId = { exact: val };
                }
                var constraints = {
                    video: videoConstraints,
                };
                this.getUserMedia(constraints);

            },

            /**打开摄像头*/
           getUserMedia(constraints, success, error) {
                    if (navigator.mediaDevices.getUserMedia) {
                        //最新的标准API
                        navigator.mediaDevices.getUserMedia(constraints).then(success=>{
                            // 摄像头开启成功
                            this.$refs['video'].srcObject = success
                            // 实时拍照效果
                            this.$refs['video'].play()
                        }).catch(error);

                    } else if (navigator.webkitGetUserMedia) {
                        //webkit核心浏览器
                        navigator.webkitGetUserMedia(constraints,success, error)
                    } else if (navigator.mozGetUserMedia) {
                        //firfox浏览器
                        navigator.mozGetUserMedia(constraints, success, error);
                    } else if (navigator.getUserMedia) {
                        //旧版API
                        navigator.getUserMedia(constraints, success, error);
                    }
                },


            /**
             * 检查取景框是否有人脸
             * */
            checkFace() {
                var video = document.getElementById("video");
                var canvas = document.getElementById("canvas");
                var context = canvas.getContext("2d");

                this.tracker = new tracking.ObjectTracker("face");
                this.tracker.setInitialScale(4);
                this.tracker.setStepSize(2);
                this.tracker.setEdgesDensity(0.1);
                this.trackerTask = tracking.track("#video",  this.tracker, { camera: true });
                let self = this;
                this.tracker.on("track", event=> {
                    if (event.data.length <= 0) {
                        self.setFace(false);
                        return;
                    }
                    self.setFace(true);
                    context.clearRect(0, 0, canvas.width, canvas.height);
                    event.data.forEach(function (rect) {
                        context.strokeStyle = '#a64ceb';
                        context.strokeRect(rect.x, rect.y, rect.width, rect.height);
                        context.font = '11px Helvetica';
                        context.fillStyle = "#fff";
                        context.fillText('x: ' + rect.x + 'px', rect.x + rect.width + 5, rect.y + 11);
                        context.fillText('y: ' + rect.y + 'px', rect.x + rect.width + 5, rect.y + 22);
                    });
                });
            },
            setFace(data){
                this.isHasFace = data;
            }



        },

        beforeDestroy () {
            clearInterval(this.myInterval);
            // 停止侦测
            this.trackerTask.stop();
        }

    }
</script>
<style lang="less" scoped>
    .testTracking {
        min-height: 700px;
        width: 100%;
        position: relative;
        > * {
            position: absolute;
            left: 0;
            right: 0;
            margin: auto;
        }
        video,
        canvas {
            top: 0;
        }
        .buttonDiv {
            bottom: 10px;
        }
    }
</style>

 

最后

以上就是柔弱饼干为你收集整理的vue实现拍照人脸识别功能带人脸选中框实现步骤:***demo到此结束了***参考文章:https://blog.csdn.net/qingwu1104/article/details/100881658此外:上面的是案例,下面是另一份应用,不予解释,的全部内容,希望文章能够帮你解决vue实现拍照人脸识别功能带人脸选中框实现步骤:***demo到此结束了***参考文章:https://blog.csdn.net/qingwu1104/article/details/100881658此外:上面的是案例,下面是另一份应用,不予解释,所遇到的程序开发问题。

如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。

本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
点赞(45)

评论列表共有 0 条评论

立即
投稿
返回
顶部