model_api.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. """
  2. 企业级 OCR API 服务
  3. 提供基于 FastAPI 的高并发 OCR 推理服务
  4. """
  5. import asyncio
  6. import base64
  7. import io
  8. import logging
  9. import sys
  10. from contextlib import asynccontextmanager
  11. from typing import Optional, Dict, Any
  12. from datetime import datetime
  13. from fastapi import FastAPI, HTTPException, status
  14. from fastapi.responses import JSONResponse
  15. from pydantic import BaseModel, Field, validator
  16. from PIL import Image
  17. import uvicorn
  18. from model.qwen_ocr import QwenOcr
  19. # ==================== 日志配置 ====================
  20. logging.basicConfig(
  21. level=logging.INFO,
  22. format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
  23. handlers=[
  24. logging.StreamHandler(sys.stdout),
  25. logging.FileHandler('ocr_api.log', encoding='utf-8')
  26. ]
  27. )
  28. logger = logging.getLogger(__name__)
  29. # ==================== 请求/响应模型 ====================
  30. class OCRRequest(BaseModel):
  31. """OCR 推理请求模型"""
  32. image: str = Field(..., description="Base64 编码的图像字符串")
  33. text: str = Field(..., description="OCR 提示词文本")
  34. @validator('image')
  35. def validate_image(cls, v):
  36. """验证 base64 图像格式"""
  37. if not v:
  38. raise ValueError("图像不能为空")
  39. try:
  40. # 尝试解码验证格式
  41. base64.b64decode(v)
  42. except Exception:
  43. raise ValueError("无效的 base64 图像格式")
  44. return v
  45. @validator('text')
  46. def validate_text(cls, v):
  47. """验证提示词文本"""
  48. if not v or not v.strip():
  49. raise ValueError("提示词不能为空")
  50. return v.strip()
  51. class OCRResponse(BaseModel):
  52. """OCR 推理响应模型"""
  53. success: bool = Field(..., description="请求是否成功")
  54. data: Optional[Any] = Field(None, description="推理结果数据")
  55. message: str = Field(..., description="响应消息")
  56. timestamp: str = Field(..., description="响应时间戳")
  57. request_id: Optional[str] = Field(None, description="请求ID(用于追踪)")
  58. class HealthResponse(BaseModel):
  59. """健康检查响应模型"""
  60. status: str
  61. model_loaded: bool
  62. timestamp: str
  63. concurrent_requests: int
  64. max_concurrent: int
  65. # ==================== 模型管理器(单例模式) ====================
  66. class ModelManager:
  67. """模型管理器 - 单例模式确保全局只有一个模型实例"""
  68. _instance: Optional['ModelManager'] = None
  69. _lock = asyncio.Lock()
  70. def __init__(self):
  71. self.model: Optional[QwenOcr] = None
  72. self.is_loaded: bool = False
  73. self.semaphore: Optional[asyncio.Semaphore] = None
  74. self.max_concurrent_requests: int = 10 # 最大并发请求数
  75. self.current_requests: int = 0
  76. self._request_lock = asyncio.Lock()
  77. @classmethod
  78. async def get_instance(cls) -> 'ModelManager':
  79. """获取单例实例(线程安全)"""
  80. if cls._instance is None:
  81. async with cls._lock:
  82. if cls._instance is None:
  83. cls._instance = cls()
  84. return cls._instance
  85. async def load_model(self, max_concurrent: int = 5):
  86. """
  87. 加载模型
  88. Args:
  89. max_concurrent: 最大并发请求数
  90. """
  91. if self.is_loaded:
  92. logger.warning("模型已经加载,跳过重复加载")
  93. return
  94. try:
  95. logger.info("开始加载 QwenOcr 模型...")
  96. # 在线程池中加载模型,避免阻塞事件循环
  97. loop = asyncio.get_event_loop()
  98. self.model = await loop.run_in_executor(None, QwenOcr)
  99. # 初始化并发控制
  100. self.max_concurrent_requests = max_concurrent
  101. self.semaphore = asyncio.Semaphore(max_concurrent)
  102. self.is_loaded = True
  103. logger.info(f"模型加载成功! 最大并发数: {max_concurrent}")
  104. except Exception as e:
  105. logger.error(f"模型加载失败: {e}", exc_info=True)
  106. raise RuntimeError(f"模型加载失败: {str(e)}")
  107. async def unload_model(self):
  108. """卸载模型并释放资源"""
  109. if not self.is_loaded:
  110. return
  111. try:
  112. logger.info("开始卸载模型...")
  113. # 等待所有正在进行的请求完成
  114. while self.current_requests > 0:
  115. logger.info(f"等待 {self.current_requests} 个请求完成...")
  116. await asyncio.sleep(0.5)
  117. self.model = None
  118. self.semaphore = None
  119. self.is_loaded = False
  120. logger.info("模型卸载成功")
  121. except Exception as e:
  122. logger.error(f"模型卸载失败: {e}", exc_info=True)
  123. def base64_to_pil(self, base64_str: str) -> Image.Image:
  124. """
  125. 将 base64 字符串转换为 PIL Image
  126. Args:
  127. base64_str: base64 编码的图像字符串
  128. Returns:
  129. PIL.Image 对象
  130. """
  131. try:
  132. # 解码 base64
  133. image_data = base64.b64decode(base64_str)
  134. # 转换为 PIL Image
  135. image = Image.open(io.BytesIO(image_data))
  136. # 确保是 RGB 模式
  137. if image.mode != 'RGB':
  138. image = image.convert('RGB')
  139. return image
  140. except Exception as e:
  141. logger.error(f"Base64 转换失败: {e}")
  142. raise ValueError(f"图像解码失败: {str(e)}")
  143. async def inference(self, image_base64: str, prompt: str) -> list:
  144. """
  145. 执行 OCR 推理(带并发控制)
  146. Args:
  147. image_base64: base64 编码的图像
  148. prompt: 提示词
  149. Returns:
  150. 推理结果
  151. """
  152. if not self.is_loaded or self.model is None:
  153. raise RuntimeError("模型未加载")
  154. # 并发控制
  155. async with self.semaphore:
  156. async with self._request_lock:
  157. self.current_requests += 1
  158. try:
  159. # 转换图像
  160. pil_image = self.base64_to_pil(image_base64)
  161. # 在线程池中执行推理,避免阻塞
  162. loop = asyncio.get_event_loop()
  163. result = await loop.run_in_executor(
  164. None,
  165. self.model.inference,
  166. pil_image,
  167. prompt
  168. )
  169. return result
  170. finally:
  171. async with self._request_lock:
  172. self.current_requests -= 1
  173. def get_status(self) -> Dict[str, Any]:
  174. """获取模型状态"""
  175. return {
  176. "is_loaded": self.is_loaded,
  177. "current_requests": self.current_requests,
  178. "max_concurrent": self.max_concurrent_requests
  179. }
  180. # ==================== FastAPI 应用 ====================
  181. @asynccontextmanager
  182. async def lifespan(app: FastAPI):
  183. """应用生命周期管理"""
  184. # 启动时加载模型
  185. logger.info("应用启动中...")
  186. manager = await ModelManager.get_instance()
  187. try:
  188. await manager.load_model(max_concurrent=10)
  189. logger.info("应用启动完成")
  190. except Exception as e:
  191. logger.error(f"应用启动失败: {e}")
  192. raise
  193. yield
  194. # 关闭时卸载模型
  195. logger.info("应用关闭中...")
  196. await manager.unload_model()
  197. logger.info("应用已关闭")
  198. # 创建 FastAPI 应用
  199. app = FastAPI(
  200. title="QwenOCR API",
  201. description="企业级 OCR 推理服务",
  202. version="1.0.0",
  203. lifespan=lifespan
  204. )
  205. # ==================== API 端点 ====================
  206. @app.get("/", response_model=Dict[str, str])
  207. async def root():
  208. """根路径"""
  209. return {
  210. "message": "QwenOCR API Service",
  211. "version": "1.0.0",
  212. "docs": "/docs"
  213. }
  214. @app.get("/health", response_model=HealthResponse)
  215. async def health_check():
  216. """健康检查端点"""
  217. manager = await ModelManager.get_instance()
  218. status_info = manager.get_status()
  219. return HealthResponse(
  220. status="healthy" if status_info["is_loaded"] else "unhealthy",
  221. model_loaded=status_info["is_loaded"],
  222. timestamp=datetime.now().isoformat(),
  223. concurrent_requests=status_info["current_requests"],
  224. max_concurrent=status_info["max_concurrent"]
  225. )
  226. @app.post("/api/v1/ocr", response_model=OCRResponse)
  227. async def ocr_inference(request: OCRRequest):
  228. """
  229. OCR 推理端点
  230. Args:
  231. request: OCRRequest 对象,包含 image(base64) 和 text(提示词)
  232. Returns:
  233. OCRResponse: 推理结果
  234. """
  235. request_id = f"req_{datetime.now().strftime('%Y%m%d%H%M%S%f')}"
  236. logger.info(f"[{request_id}] 收到 OCR 请求")
  237. try:
  238. # 获取模型管理器
  239. manager = await ModelManager.get_instance()
  240. if not manager.is_loaded:
  241. raise HTTPException(
  242. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  243. detail="模型未加载,服务暂不可用"
  244. )
  245. # 执行推理
  246. logger.info(f"[{request_id}] 开始推理...")
  247. result = await manager.inference(request.image, request.text)
  248. logger.info(f"[{request_id}] 推理完成")
  249. return OCRResponse(
  250. success=True,
  251. data=result,
  252. message="推理成功",
  253. timestamp=datetime.now().isoformat(),
  254. request_id=request_id
  255. )
  256. except ValueError as e:
  257. # 参数验证错误
  258. logger.warning(f"[{request_id}] 参数验证失败: {e}")
  259. raise HTTPException(
  260. status_code=status.HTTP_400_BAD_REQUEST,
  261. detail=str(e)
  262. )
  263. except RuntimeError as e:
  264. # 模型运行时错误
  265. logger.error(f"[{request_id}] 运行时错误: {e}")
  266. raise HTTPException(
  267. status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
  268. detail=f"推理失败: {str(e)}"
  269. )
  270. except Exception as e:
  271. # 未知错误
  272. logger.error(f"[{request_id}] 未知错误: {e}", exc_info=True)
  273. raise HTTPException(
  274. status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
  275. detail=f"服务器内部错误: {str(e)}"
  276. )
  277. @app.exception_handler(Exception)
  278. async def global_exception_handler(request, exc):
  279. """全局异常处理器"""
  280. logger.error(f"全局异常捕获: {exc}", exc_info=True)
  281. return JSONResponse(
  282. status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
  283. content={
  284. "success": False,
  285. "data": None,
  286. "message": f"服务器错误: {str(exc)}",
  287. "timestamp": datetime.now().isoformat()
  288. }
  289. )
  290. # ==================== 主函数 ====================
  291. def main():
  292. """启动服务"""
  293. uvicorn.run(
  294. "model.model_api:app",
  295. host="0.0.0.0",
  296. port=8000,
  297. workers=1, # 由于模型占用内存大,使用单worker
  298. log_level="info",
  299. access_log=True,
  300. reload=False # 生产环境禁用热重载
  301. )
  302. if __name__ == "__main__":
  303. main()