1. 起因, 目的, 感受:
- github deepface 這個項目寫的很好, 繼續研究
- 使用這個項目,改寫 api。
- 增加一個前端 flask app
2. 先看效果
3. 過程:
大力改寫原始項目中 api 這部分的代碼,
原始項目的文件結構太繁雜了:
我把這部分的內容,合為一個文件,即 api.py, 能刪盡刪。
代碼 1, api
from flask import Flask
from flask_cors import CORS
import argparse
from typing import Union
from flask import Blueprint, request
import numpy as np
import os
import tempfile
import logging
from deepface import DeepFace
from deepface.api.src.modules.core import service
from deepface.commons import image_utils
from deepface.commons.logger import Logger# 配置日志
logging.basicConfig(level=logging.INFO)
logger = Logger()
blueprint = Blueprint("routes", __name__)# 輔助函數:將 NumPy 類型轉換為 JSON 可序列化格式
def convert_numpy(obj):if isinstance(obj, np.floating):return float(obj)elif isinstance(obj, np.integer):return int(obj)elif isinstance(obj, np.ndarray):return obj.tolist()elif isinstance(obj, dict):return {k: convert_numpy(v) for k, v in obj.items()}elif isinstance(obj, list):return [convert_numpy(i) for i in obj]return objdef extract_image_from_request(img_key: str) -> Union[str, np.ndarray]:"""Extracts an image from the request either from json or a multipart/form-data file.Args:img_key (str): The key used to retrieve the image datafrom the request (e.g., 'img').Returns:img (str or np.ndarray): Given image detail (base64 encoded string, image path or url)or the decoded image as a numpy array."""if request.files:logging.info(f"request: {request}")logging.info(f"request.files: {request.files}")file = request.files.get(img_key)logging.info(f"img_key: {img_key}")logging.info(f"file: {file}")if file is None:raise ValueError(f"Request form data doesn't have {img_key}")if file.filename == "":raise ValueError(f"No file uploaded for '{img_key}'")# 獲取文件擴展名_, ext = os.path.splitext(file.filename)if not ext:ext = '.jpg'# 保存到臨時文件with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as temp_file:file.save(temp_file.name)temp_file_path = temp_file.namelogging.info(f"Saved temp file: {temp_file_path}, size: {os.path.getsize(temp_file_path)} bytes")try:if not os.path.exists(temp_file_path):raise ValueError(f"Temporary file not found: {temp_file_path}")img, _ = image_utils.load_image(temp_file_path)if img is None:raise ValueError(f"Failed to load image from {temp_file_path}")logging.info(f"Loaded image shape: {img.shape if isinstance(img, np.ndarray) else 'not a numpy array'}")return imgfinally:if os.path.exists(temp_file_path):os.unlink(temp_file_path)elif request.is_json or request.form:logging.info(f"request.json: {request.json}")logging.info(f"request.form: {request.form}")input_args = request.get_json() or request.form.to_dict()if input_args is None:raise ValueError("empty input set passed")img = input_args.get(img_key)if not img:raise ValueError(f"'{img_key}' not found in either json or form data request")return imgraise ValueError(f"'{img_key}' not found in request in either json or form data")@blueprint.route("/")
def home():return f"<h1>Welcome to DeepFace API v{DeepFace.__version__}!</h1>"@blueprint.route("/represent", methods=["POST"])
def represent():input_args = (request.is_json and request.get_json()) or (request.form and request.form.to_dict())try:img = extract_image_from_request("img")except Exception as err:return {"exception": str(err)}, 400obj = service.represent(img_path=img,model_name=input_args.get("model_name", "VGG-Face"),detector_backend=input_args.get("detector_backend", "opencv"),enforce_detection=input_args.get("enforce_detection", True),align=input_args.get("align", True),anti_spoofing=input_args.get("anti_spoofing", False),max_faces=input_args.get("max_faces"),)logger.debug(obj)return convert_numpy(obj) # 轉換 NumPy 類型@blueprint.route("/verify", methods=["POST"])
def verify():input_args = (request.is_json and request.get_json()) or (request.form and request.form.to_dict())try:img1 = extract_image_from_request("img1")except Exception as err:return {"exception": str(err)}, 400try:img2 = extract_image_from_request("img2")except Exception as err:return {"exception": str(err)}, 400verification = service.verify(img1_path=img1,img2_path=img2,model_name=input_args.get("model_name", "VGG-Face"),detector_backend=input_args.get("detector_backend", "opencv"),distance_metric=input_args.get("distance_metric", "cosine"),align=input_args.get("align", True),enforce_detection=input_args.get("enforce_detection", True),anti_spoofing=input_args.get("anti_spoofing", False),)logger.debug(verification)return convert_numpy(verification) # 轉換 NumPy 類型@blueprint.route("/analyze", methods=["POST"])
def analyze():input_args = (request.is_json and request.get_json()) or (request.form and request.form.to_dict())try:img = extract_image_from_request("img")logging.info(f"api 里面收到的 img 是: {type(img)}")except Exception as err:return {"exception": str(err)}, 400actions = input_args.get("actions", ["age", "gender", "emotion", "race"])if isinstance(actions, str):actions = (actions.replace("[", "").replace("]", "").replace("(", "").replace(")", "").replace('"', "").replace("'", "").replace(" ", "").split(","))try:demographies = service.analyze(img_path=img,actions=actions,detector_backend=input_args.get("detector_backend", "opencv"),enforce_detection=input_args.get("enforce_detection", True),align=input_args.get("align", True),anti_spoofing=input_args.get("anti_spoofing", False),)except Exception as e:return {"error": f"Exception while analyzing: {str(e)}"}, 400logger.debug(demographies)return convert_numpy(demographies) # 轉換 NumPy 類型def create_app():app = Flask(__name__)CORS(app)app.register_blueprint(blueprint)logger.info(f"Welcome to DeepFace API v{DeepFace.__version__}!")return appif __name__ == "__main__":deepface_app = create_app()parser = argparse.ArgumentParser()parser.add_argument("-p", "--port", type=int, default=5005, help="Port of serving api")args = parser.parse_args()deepface_app.run(host="0.0.0.0", port=args.port, debug=True)
代碼 2, flask app.py
- 此項目,后端 api 是用 flask 寫的, 前端我也用 flask 來寫。
from flask import Flask, render_template, request, redirect, url_for, flash
from werkzeug.utils import secure_filename
import os
import uuid
import requests
import json
import numpy as npapp = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'static/uploads'
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 限制上傳文件大小為16MB
app.secret_key = 'your_secret_key' # 用于 flash 消息# DeepFace API 的地址
DEEPFACE_API_URL = 'http://127.0.0.1:5005/analyze'# 允許的圖片擴展名
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}# 檢查文件擴展名是否允許
def allowed_file(filename):return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS# 確保上傳文件夾存在
if not os.path.exists(app.config['UPLOAD_FOLDER']):os.makedirs(app.config['UPLOAD_FOLDER'])# 輔助函數:將 NumPy 數據轉換為 JSON 可序列化格式
def convert_numpy(obj):if isinstance(obj, np.floating):return float(obj)elif isinstance(obj, np.integer):return int(obj)elif isinstance(obj, np.ndarray):return obj.tolist()elif isinstance(obj, dict):return {k: convert_numpy(v) for k, v in obj.items()}elif isinstance(obj, list):return [convert_numpy(i) for i in obj]return obj@app.route('/')
def index():# return render_template('index.html')return render_template('home.html')@app.route('/analyze', methods=['POST'])
def analyze():# 處理文件上傳if 'file' in request.files and request.files['file'].filename:file = request.files['file']if not allowed_file(file.filename):flash('不支持的文件類型,僅支持 PNG、JPG、JPEG')return redirect(url_for('index'))# 保存文件(用于前端顯示)filename = str(uuid.uuid4()) + '.' + file.filename.rsplit('.', 1)[1].lower()file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)file.save(file_path)# 重置文件流指針file.stream.seek(0)# 發送到 DeepFace APIfiles = {'img': (filename, file.stream, file.content_type)}data = {'actions': json.dumps(['age', 'gender', 'emotion', 'race']),'detector_backend': 'opencv','enforce_detection': 'true','align': 'true','anti_spoofing': 'false'}response = requests.post(DEEPFACE_API_URL, files=files, data=data)# 處理 Base64 輸入(保留以兼容現有前端)elif request.form.get('base64'):base64_string = request.form['base64']if 'base64,' in base64_string:base64_string = base64_string.split('base64,')[1]payload = {'img': f'data:image/jpeg;base64,{base64_string}','actions': ['age', 'gender', 'emotion', 'race'],'detector_backend': 'opencv','enforce_detection': True,'align': True,'anti_spoofing': False}headers = {'Content-Type': 'application/json'}response = requests.post(DEEPFACE_API_URL, json=payload, headers=headers)else:flash('請上傳圖片文件或提供 Base64 字符串')return render_template('home.html')# 檢查響應if response.status_code == 200:results = response.json()results = convert_numpy(results)flash('分析成功!')print(f"results: {results}")return render_template('home.html', results=results, image_url=file_path if 'file' in request.files else None)else:print("API 響應:", response.text)error_msg = response.json()flash(f'API 調用失敗:{error_msg}')return render_template('home.html')if __name__ == '__main__':app.run(debug=True, host='0.0.0.0', port=8989)
4. 結論 ,todo, 感受
- 有些地方我覺得能自己寫,但是卻不行。 步子太大了。 即便是有AI, 很多地方我還是不理解。
- 這個項目只能說是,不盡完善。 所以我做起來,麻煩重重。
- 一個球投不進,也不能全怪我,有可能是隊友球傳的不好,傳的太偏了,太低了。
希望對大家有幫助。