Cordova 不允许将本地文件加载到face-api.js 中使用的 TensorFlow 训练中,但在 iOS 或浏览器中不会发生此问题。怎么解决?
要在本地加载文件,这些文件将被解压并用于 TensorFlow 中的训练,您必须告诉 Face-api.js 库将调用哪个方法来读取文件,并在faceapi.env.monkeyPatch中设置值。
我不能说这是最好的解决方案,但它是一个有效的解决方案。我将 Android 平台与其他平台分开(没有问题),并且在 Android 内部我将 JSON 文件与二进制文件分开。
这是一个完整的示例,考虑到加载带有人脸的 512x512 图像:
PS:VueJS 中的 Javascript 代码。
插件列表
cordoba-插件设备
cordova-插件文件
应用程序.vue
<script>
import * as faceapi from "face-api.js";
export default {
data: () => ({}),
mounted() {
let cls = this;
document.addEventListener(
"deviceready",
function() {
cls.loadFaceDetectModels();
},
false
);
},
methods: {
async loadFaceDetectModels() {
let MODEL_URL;
if (window.device.platform === "Android") {
MODEL_URL =
window.cordova.file.applicationDirectory + "www/static/models/";
faceapi.env.monkeyPatch({
readFile: filePath =>
new Promise(resolve => {
window.resolveLocalFileSystemURL(
filePath,
function(fileEntry) {
fileEntry.file(
function(file) {
var reader = new FileReader();
let fileExtension = filePath
.split("?")[0]
.split(".")
.pop();
if (fileExtension === "json") {
reader.onloadend = function() {
resolve(this.result);
};
reader.readAsText(file);
} else {
reader.onloadend = function() {
resolve(new Uint8Array(this.result));
};
reader.readAsArrayBuffer(file);
}
},
function() {
resolve(false);
}
);
},
function() {
resolve(false);
}
);
}),
Canvas: HTMLCanvasElement,
Image: HTMLImageElement,
ImageData: ImageData,
Video: HTMLVideoElement,
createCanvasElement: () => document.createElement("canvas"),
createImageElement: () => document.createElement("img")
});
await faceapi.nets.tinyFaceDetector.loadFromDisk(MODEL_URL);
await faceapi.nets.faceRecognitionNet.loadFromDisk(MODEL_URL);
} else {
MODEL_URL = "./static/models";
await faceapi.loadTinyFaceDetectorModel(MODEL_URL);
await faceapi.loadFaceRecognitionModel(MODEL_URL);
}
this.testFaceDetector();
},
testFaceDetector() {
let cls = this;
let baseImage = new Image();
baseImage.src = "./static/img/faceWillSmith.jpg";
baseImage.onload = function() {
faceapi
.detectSingleFace(baseImage, new faceapi.TinyFaceDetectorOptions())
.run()
.then(res => {
alert(JSON.stringify(res));
});
};
}
}
};
</script>
config.xml
<platform name="android">
<allow-intent href="market:*" />
<preference name="loadUrlTimeoutValue" value="700000" />
<preference name="android-minSdkVersion" value="21" />
<preference name="android-targetSdkVersion" value="21" />
<preference name="AndroidPersistentFileLocation" value="Compatibility" />
<preference name="AndroidPersistentFileLocation" value="Internal" />
<preference name="AndroidExtraFilesystems" value="files,files-external,documents,sdcard,cache,cache-external,assets,root,applicationDirectory" />
</platform>
@luisdemarchi 感谢您的回答。这非常有效。在cordova 12.0.1中,我发现本地文件有一个以
https://localhost/__cdvfile_assets__/www/<relativePath>
开头的URL。这是基于我将 console.log 放入 readFile Monkeypatch 中所发现的内容。然后,我添加了一个可以正常工作的 fetch 补丁,并且不需要将 loadFromUri 调用更改为 loadFromDisk。您无需更改face-api.js 中的任何内容。但是,如果您的权重存储在权重目录中,请更改应用程序代码中的加载模型调用,以使用“weights/”而不是“/”加载:
await faceapi.loadFaceLandmarkModel('/')
await faceapi.nets.ageGenderNet.load('/')
到
await faceapi.loadFaceLandmarkModel('weights/')
await faceapi.nets.ageGenderNet.load('weights/')
这是最终代码:
document.addEventListener('deviceready', onDeviceReady, false);
async function onDeviceReady() {
let MODEL_URL;
if (window.device.platform === "Android") {
MODEL_URL =
window.cordova.file.applicationDirectory + "/www/weights/";
console.log('model url: ', MODEL_URL);
faceapi.env.monkeyPatch({
readFile: filePath =>
new Promise(resolve => {
window.resolveLocalFileSystemURL(
filePath,
function(fileEntry) {
fileEntry.file(
function(file) {
var reader = new FileReader();
let fileExtension = filePath
.split("?")[0]
.split(".")
.pop();
console.log('filePath: ', filePath, 'ext:', fileExtension, ', file: ', file);
if (fileExtension === "json") {
reader.onloadend = function() {
resolve(this.result);
};
reader.readAsText(file);
} else {
reader.onloadend = function() {
resolve(new Uint8Array(this.result));
};
reader.readAsArrayBuffer(file);
}
},
function() {
resolve(false);
}
);
},
function() {
resolve(false);
}
);
}),
Canvas: HTMLCanvasElement,
CanvasRenderingContext2D: CanvasRenderingContext2D,
Image: HTMLImageElement,
ImageData: ImageData,
Video: HTMLVideoElement,
createCanvasElement: () => document.createElement("canvas"),
createImageElement: () => document.createElement("img"),
fetch: function(url, options) {
console.log('fetch: ', url, options);
modifiedUrl = 'https://localhost/__cdvfile_assets__/www/' + url;
return window['fetch'](modifiedUrl, options);
}
});
//await faceapi.nets.tinyFaceDetector.loadFromDisk(MODEL_URL);
//await faceapi.nets.faceRecognitionNet.loadFromDisk(MODEL_URL);
} else {
MODEL_URL = "./weights";
//await faceapi.loadTinyFaceDetectorModel(MODEL_URL);
//await faceapi.loadFaceRecognitionModel(MODEL_URL);
}
}