我正在尝试从运行 fastApi 服务器的脚本中获取
.exe
文件。打开 .exe 文件时,cmd 抛出以下错误
Traceback (most recent call last):
File "importlib\metadata\__init__.py", line 563, in from_name
StopIteration
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "transformers\utils\versions.py", line 102, in require_version
File "importlib\metadata\__init__.py", line 1008, in version
File "importlib\metadata\__init__.py", line 981, in distribution
File "importlib\metadata\__init__.py", line 565, in from_name
importlib.metadata.PackageNotFoundError: No package metadata was found for tqdm
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 4, in <module>
File "PyInstaller\loader\pyimod02_importers.py", line 391, in exec_module
File "transformers\__init__.py", line 26, in <module>
File "PyInstaller\loader\pyimod02_importers.py", line 391, in exec_module
File "transformers\dependency_versions_check.py", line 57, in <module>
File "transformers\utils\versions.py", line 117, in require_version_core
File "transformers\utils\versions.py", line 104, in require_version
importlib.metadata.PackageNotFoundError: No package metadata was found for The 'tqdm>=4.27' distribution was not found and is required by this application.
Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main
我正在使用虚拟 venv 来安装脚本所需的包。这些软件包包括 pytorch 和 Transformers,它们会引发此错误。主要脚本是:
from fastapi import FastAPI, HTTPException
import uvicorn
from multiprocessing import Process, freeze_support
from transformers import pipeline
from transformers import AutoTokenizer
from transformers import AutoModelForSeq2SeqLM
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI(debug=True)
# uvicorn.run(app, host="127.0.0.1", port=8000)
# Allow requests from your React frontend origin
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Add your frontend's origin here
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def pick_model(source_language, target_language):
# Define a dictionary to map language combinations to model paths
model_paths = {
("de", "fr"): r"path1",
("de", "en"): r"path2",
("en", "fr"): r"path3",
("de", "es"): r"path4",
("en", "de"): r"path5",
("en", "es"): r"path6",
("fr", "de"): r"path7",
}
# Build the model_path key using the source and target languages
model_path_key = (source_language, target_language)
# Check if the key exists in the dictionary, and return the corresponding model path
if model_path_key in model_paths:
return model_paths[model_path_key]
else:
return None
# Manual cowboy inference
def translate_manual(input_text, model_path):
tokenizer = AutoTokenizer.from_pretrained(model_path)
inputs = tokenizer(input_text, return_tensors="pt").input_ids
model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
outputs = model.generate(
inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95
)
output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return output_text
# Inference with pipeline()
def translate_pipeline(input_text, model_path):
translator = pipeline("translation_XX_to_YY", model=model_path)
output_text = translator(input_text, max_length=400)
return output_text[0]["translation_text"]
class TextRequestModel(BaseModel):
text: str
sourceLanguage: str # Include sourceLanguage in the model
targetLanguage: str # Include targetLanguage in the model
class CustomHTTPException(HTTPException):
def __init__(self, detail: str, status_code: int = 500):
super().__init__(status_code=status_code, detail=detail)
# root API endpoint
@app.get("/")
def read_root():
try:
return {"message": "Hello, This is the root of IAV Translate App!"}
except Exception as e:
raise CustomHTTPException(detail=str(e))
# Define API endpoints
@app.post("/translate/manual")
async def translate_manual_endpoint(request_data: TextRequestModel):
try:
text = request_data.text
source_language = request_data.sourceLanguage
target_language = request_data.targetLanguage
# print("source_language", request_data.sourceLanguage)
model_path = pick_model(source_language, target_language)
# print("model_path", model_path)
result = translate_manual(text, model_path)
return {"translation": result}
except Exception as e:
raise CustomHTTPException(detail=str(e))
@app.post("/translate/pipeline")
async def translate_pipeline_endpoint(request_data: TextRequestModel):
try:
text = request_data.text
print(text)
source_language = request_data.sourceLanguage
target_language = request_data.targetLanguage
model_path = pick_model(source_language, target_language)
print(model_path)
result = translate_pipeline(text, model_path)
return {"translation": result}
except Exception as e:
raise CustomHTTPException(detail=str(e))
if __name__ == "__main__":
freeze_support()
uvicorn.run("main:app", host="127.0.0.1", port=8000, reload=True)
为了创建此脚本的构建,我使用了
pyinstaller
:pyinstaller --onefile --windowed main.py
。生成的main.spec
文件如下(我这里没有做任何修改,是默认生成的):
# -*- mode: python ; coding: utf-8 -*-
a = Analysis(
['main.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
noarchive=False,
)
pyz = PYZ(a.pure)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.datas,
[],
name='main',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=False,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
由于我使用了虚拟环境,pyinstaller 应该识别所有包并针对所有已安装的包构建可执行文件,但事实并非如此。我已经看到了这个解决方案Link,但没有指定如何实现它,因为
.spec
文件是在构建完成后生成的。那么,如何强制 pyinstaller 识别丢失的包呢?谢谢!!
你找到解决方案了吗