我试图使用 HTTP 请求将实时视频流从 JS 传递到 python 进行人脸识别,但出现错误
werkzeug.exceptions.BadRequestKeyError: 400 Bad Request: The browser (or proxy) sent a request that this server could not understand. KeyError: 'video'
在检查终端的应用程序日志后,我看到流的数据类型为
Bytes
,我相信我需要将其转换为 numpy 数组,然后再将其传递给 python 脚本。
我目前的实现如下 我的 JS 代码位于 html 模板上,该模板打开一个对我的 python 文件的
POST
请求,该文件现在处理实时视频流以获取其中包含面孔的帧。
这是带有 JS 的 HTML
`<button id="startBtn" onclick="openCam()">Open Webcam</button>
<br>
<!-- close button -->
<button id="closeButton" onclick="closeCam()">Close</button>
<br>
<!--video element to show the live video-->
<video id="video" autoplay></video>
<!-- javascript video stream -->
<script>
const video = document.getElementById('video');
const closeButton = document.getElementById('closeButton');
const canvas = document.createElement('canvas');
const context = canvas.getContext('2d');
let intervalID;
function openCam(){
navigator.mediaDevices.getUserMedia({ video: true })
.then((stream) => {
video.srcObject = stream;
intervalID = setInterval(() => {
context.drawImage(video, 0, 0, canvas.width, canvas.height);
const dataURL = canvas.toDataURL('image/jpeg');
const xhr = new XMLHttpRequest();
xhr.open('POST', '/markin');
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded');
xhr.send(`video=${encodeURIComponent(dataURL)}`);
}, 1000 / 30);
})
.catch((error) => {
console.error(error);
});
}
// Close the live stream by removing the interval counter
function closeCam(){
// const stopButton = document.getElementById('stopButton');
closeButton.addEventListener('click', () => {
clearInterval(intervalID);
video.srcObject.getTracks()[0].stop();
window.location.href = "{{ url_for('markin') }}";
});
}
</script>`
这是代码的Python结尾
`# get the video stream from the data request
data = request.form['video']
dataURL = request.form.get('video',False)
if dataURL is not null:
print("DATA IS NOT EMPTY!!!")
# print(dataURL)
print(type(data))
print(data)
else:
print("DATA IS EMPTY!!!")
# Decode Base64-encoded video stream data
decoded_data = base64.b64decode(dataURL.split(',')[1])
img = decoded_data
if img is not null:
print("IMAGE IS NOT EMPTY!!!")
print(type(img))
print(img)`
我已经尝试应用其他实时视频流选项,例如 WebRTC,尝试将其配置到我的应用程序是一个相当艰难的旅程,如果您将其作为解决方案提供,我将不胜感激如何执行此操作的更直接的示例这个问题。
错误表明在请求的表单数据中找不到密钥
video
。
以下简单示例基本上基于您的代码。 每隔一段时间从视频中提取单个图像并将其作为文件发送到服务器。这里,面孔被识别并用矩形标记。然后图像被发送回客户端并在那里显示。
from base64 import b64decode, b64encode
from flask import Flask, abort, render_template, request, send_file
from io import BytesIO
import cv2 as cv
import numpy as np
import os
face_classifier = cv.CascadeClassifier(
os.path.join(
cv.data.haarcascades,
'haarcascade_frontalface_default.xml'
)
)
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.post('/process')
def process():
file = request.files.get('image')
if file:
nparr = np.fromstring(file.read(), np.uint8)
frame = cv.imdecode(nparr, cv.IMREAD_ANYCOLOR)
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(frame_gray, 1.3, 5)
for (x,y,w,h) in faces:
frame = cv.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 2)
_, buffer = cv.imencode('.jpg', frame)
return send_file(
BytesIO(buffer),
as_attachment=False,
mimetype='image/jpeg'
)
abort(400)
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Index</title>
<style>
main {
display: flex;
flex-direction: column;
align-items: center;
}
.controls {
margin: .64rem 0;
}
</style>
</head>
<body>
<main>
<div class="controls">
<button id="toggle-rec-btn">Start</button>
</div>
<img id="preview" width="400" height="300" />
</main>
<script type="text/javascript">
(function(uri) {
const FPS = 6;
const createOffVideo = (width=400, height=300) => {
const video = document.createElement('video');
video.width = width;
video.height = height;
return video;
};
const createOffCanvas = (width=400, height=300) => {
const canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
const context = canvas.getContext('2d');
return [canvas, context];
};
const offscreenVideo = createOffVideo();
const preview = document.getElementById('preview');
const toggleBtn = document.getElementById('toggle-rec-btn');
let intervalID, running = false;
toggleBtn && toggleBtn.addEventListener('click', () => {
if (running) {
clearInterval(intervalID);
offscreenVideo.pause();
} else {
if (!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia)) {
console.error('getUserMedia not supported.')
return;
}
const [canvas, context] = createOffCanvas();
navigator.mediaDevices.getUserMedia({ video: true })
.then(stream => {
offscreenVideo.srcObject = stream;
offscreenVideo.play();
intervalID = setInterval(
() => {
context.drawImage(
offscreenVideo,
0, 0,
offscreenVideo.width, offscreenVideo.height
);
canvas.toBlob(
blob => {
const formData = new FormData();
formData.append('image', blob);
fetch(uri, {
method: 'post',
cache: 'no-cache',
body: formData
})
.then(resp => resp.ok && resp.blob())
.then(blob => {
const objectURL = URL.createObjectURL(blob);
preview.src = objectURL;
});
},
'image/jpeg',
0.95
);
},
1000/FPS
);
});
}
running = !running;
toggleBtn.innerText = running ? 'Stop' : 'Start';
});
})({{ url_for('process') | tojson }});
</script>
</body>
</html>