Ver código fonte

将报告生成功能添加的后台服务中

Sherlock 11 meses atrás
pai
commit
79970d27ad
8 arquivos alterados com 30 adições e 314 exclusões
  1. 7 3
      api.py
  2. 3 3
      api_test.py
  3. 0 159
      gnerate_report.py
  4. 1 5
      models/rank/gbdt_lr_inference.py
  5. 2 2
      report.py
  6. 2 2
      utils/__init__.py
  7. 15 0
      utils/reports_process.py
  8. 0 140
      utils/result_process.py

+ 7 - 3
api.py

@@ -6,6 +6,7 @@ from models import Recommend
 import os
 from pydantic import BaseModel
 import uvicorn
+from utils import ReportUtils
 
 app = FastAPI()
 dao = MySqlDao()
@@ -59,12 +60,15 @@ def recommend(request: RecommendRequest):
                 "delivery_count": data["delivery_count"]
             }
         )
-        
+    
+    generate_report(request.city_uuid, request.product_code, request.recall_cust_count, request.delivery_count)
+    
     return {"code": 200, "msg": "success", "data": {"recommendationInfo": request_data}}
 
 def generate_report(city_uuid, product_id, recall_count, delivery_count):
     """生成报告"""
-    
+    report_util = ReportUtils(city_uuid, product_id)
+    report_util.generate_all_data(recall_count, delivery_count)
 
 if __name__ == "__main__":
-    uvicorn.run(app, host="0.0.0.0", port=8000)
+    uvicorn.run(app, host="0.0.0.0", port=7960)

+ 3 - 3
api_test.py

@@ -1,11 +1,11 @@
 import requests
 import json
 
-url = "http://172.25.208.151:7960/brandcultivation/api/v1/recommend"
+url = "http://127.0.0.1:7960/brandcultivation/api/v1/recommend"
 payload = {
     "city_uuid": "00000000000000000000000011445301",
-    "product_code": "350139",
-    "recall_cust_count": 1000,
+    "product_code": "440298",
+    "recall_cust_count": 500,
     "delivery_count": 5000
 }
 headers = {'Content-Type': 'application/json'}

+ 0 - 159
gnerate_report.py

@@ -1,159 +0,0 @@
-
-from database import RedisDatabaseHelper, MySqlDao
-from models.item2vec import Item2VecModel
-from models.rank.data.config import CustConfig, ProductConfig, ShopConfig, OrderConfig
-from models.rank.data.utils import sample_data_clear
-from models.rank.gbdt_lr_inference import GbdtLrModel, generate_feats_map
-from utils.result_process import get_cust_list_from_history_order, split_relation_subtable, generate_report
-import pandas as pd
-
-redis = RedisDatabaseHelper().redis
-dao = MySqlDao()
-gbdtlr_model = GbdtLrModel("./models/rank/weights/00000000000000000000000011445301/gbdtlr_model.pkl")
-item2vec = Item2VecModel("00000000000000000000000011445301")
-
-def get_itemcf_recall(city_uuid, product_id):
-    """协同召回"""
-    key = f"fc:{city_uuid}:{product_id}"
-    recall_list = redis.zrevrange(key, 0, -1, withscores=False)
-    return recall_list
-
-def get_hot_recall(city_uuid):
-    """热度召回"""
-    key = f"hot:{city_uuid}:sale_qty"
-    recall_list = redis.zrevrange(key, 0, -1, withscores=False)
-    return recall_list
-
-def get_recall_cust(city_uuid, product_id, recall_count):
-    """根据协同过滤和热度召回召回商户
-    """
-    itemcf_recall_list = get_itemcf_recall(city_uuid, product_id)
-    hot_recall_list = get_hot_recall(city_uuid)
-    
-    result = list(dict.fromkeys(itemcf_recall_list))
-    
-    # 如果结果不足,从hot_recall中补齐
-    if len(result) < recall_count:
-        hot_recall_set = set(hot_recall_list) - set(result)
-        additional_items = [item for item in hot_recall_list if item in hot_recall_set]
-        needed = recall_count - len(result)
-        result.extend(additional_items[:needed])
-    return result[:recall_count]
-
-def generate_recommend_sample(city_uuid, product_id):
-    """生成预测数据集"""
-    product_in_order = dao.get_product_from_order(city_uuid)["product_code"].unique().tolist()
-    if product_id in product_in_order:
-        recall_count = 1000
-        cust_list = get_recall_cust(city_uuid, product_id, recall_count)
-    else:
-        cust_list = item2vec.get_recommend_cust_list(product_id)["cust_code"].to_list()
-    
-    
-    # 获取卷烟的信息
-    product_data = dao.get_product_by_id(city_uuid, product_id)[ProductConfig.FEATURE_COLUMNS]
-    filter_dict = product_data.to_dict("records")[0]
-    
-    cust_data = dao.get_cust_by_ids(city_uuid, cust_list)[CustConfig.FEATURE_COLUMNS]
-    shop_data = dao.get_shop_by_ids(city_uuid, cust_list)[ShopConfig.FEATURE_COLUMNS]
-    
-    product_data = sample_data_clear(product_data, ProductConfig)
-    cust_data = sample_data_clear(cust_data, CustConfig)
-    shop_data = sample_data_clear(shop_data, ShopConfig)
-    
-    cust_feats = shop_data.set_index("cust_code")
-    cust_data = cust_data.join(cust_feats, on="BB_RETAIL_CUSTOMER_CODE", how="inner")
-    
-    feats_map = generate_feats_map(product_data, cust_data)
-    
-    return feats_map, filter_dict, cust_list
-
-def get_recommend_list_by_gbdt_lr(city_uuid, product_id):
-    """根据gbdt-lr进行打分并获得推荐列表,适用于推荐历史订单中存在的卷烟"""
-    feats_sample, _, cust_list = generate_recommend_sample(city_uuid, product_id)
-    recommend_list = gbdtlr_model.get_recommend_list(feats_sample, cust_list)
-    return recommend_list
-    
-
-def generate_features_shap(city_uuid, product_id, delivery_count):
-    feats_sample, filter_dict, cust_list = generate_recommend_sample(city_uuid, product_id)
-    
-    if product_id in dao.get_product_from_order(city_uuid)["product_code"].unique().tolist():
-        # 如果推荐商品为新卷烟,走iterm2vec
-        recommend_data = gbdtlr_model.get_recommend_list(feats_sample, cust_list)
-    else:
-        recommend_data = item2vec.get_recommend_cust_list(product_id).to_dict("records")
-    result = gbdtlr_model.generate_shap_interance(feats_sample)
-    generate_report(city_uuid, result, filter_dict, recommend_data, delivery_count, "./data")
-
-def eval(city_uuid, product_code):
-    """推荐效果验证"""
-    eval_report = get_cust_list_from_history_order(city_uuid, product_code)
-    eval_report.to_csv("./data/效果验证表.csv", index=False)
-    
-def generate_similarity_product(product_code):
-    product_similarity_map = item2vec.generate_product_similarity_map(product_code)
-    product_similarity_map = product_similarity_map[["product_name", "similarity", "brand_name", "factory_name", "is_low_tar", "is_medium", "is_tiny", "is_coarse", "is_exploding_beads", "is_abnormity", "is_cig", "is_chuangxin", "direct_retail_price", "tbc_total_length", "product_style"]]
-    product_similarity_map = product_similarity_map.rename(
-        columns={
-            "product_name": "卷烟名称",
-            "similarity": "相似度",
-            "factory_name": "生产厂商",
-            "brand_name": "品牌名称",
-            "is_low_tar":                "低焦油卷烟",
-            "is_medium":                 "中支烟",
-            "is_tiny":                   "细支烟",
-            "is_coarse":                 "粗支烟",
-            "is_exploding_beads":        "爆珠烟",
-            "is_abnormity":              "异形包装",
-            "is_cig":                    "雪茄烟",
-            "is_chuangxin":              "创新品类",
-            "direct_retail_price":       "卷烟建议零售价",
-            "tbc_total_length":          "烟支总长度",
-            "product_style":             "包装类型",
-        }
-    )
-    product_similarity_map.to_excel("./data/相似卷烟表.xlsx", index=False)
-
-def generate_delivery_strategy():
-    
-    pass
-
-def run():
-    pass
-
-if __name__ == '__main__':
-    generate_features_shap("00000000000000000000000011445301", "350139", delivery_count=5000)
-    generate_similarity_product("350139")
-    eval("00000000000000000000000011445301", "350355")
-    
-    # recommend_list = get_recommend_list_by_gbdt_lr("00000000000000000000000011445301", "350139")
-    # recommend_list = pd.DataFrame(recommend_list)
-    # recommend_list.to_csv("./data/recommend_list.csv", index=False, encoding="utf-8-sig")
-    
-    # 拿龙军数据
-    # data = dao.get_order_by_cust("00000000000000000000000011445301", "445323105795")
-    # data = data.groupby(["cust_code", "product_code", "product_name"], as_index=False)["sale_qty"].sum()
-    # data.to_csv("./data/cust.csv", index=False)
-    
-    # city_uuid = "00000000000000000000000011445301"
-    # order_data = dao.get_order_by_cust("00000000000000000000000011445301", "445323105795")
-    # order_data["sale_qty"] = order_data["sale_qty"].fillna(0)
-    # order_data = order_data.infer_objects(copy=False)
-    # order_data = order_data.groupby(["cust_code", "product_code", "product_name"], as_index=False)["sale_qty"].sum()
-    
-    # cust_data = dao.load_cust_data(city_uuid)[CustConfig.FEATURE_COLUMNS]
-    # sample_data_clear(cust_data, CustConfig)
-    # shop_data = dao.load_shopping_data(city_uuid)[ShopConfig.FEATURE_COLUMNS]
-    # sample_data_clear(shop_data, ShopConfig)
-    # cust_ids = shop_data.set_index("cust_code")
-    # cust_data = cust_data.join(cust_ids, on="BB_RETAIL_CUSTOMER_CODE", how="inner")
-    
-    # product_data = dao.load_product_data(city_uuid)[ProductConfig.FEATURE_COLUMNS]
-    # sample_data_clear(product_data, ProductConfig)
-    
-    # order_data = order_data.merge(product_data, on="product_code", how="inner")
-    # order_data = order_data.merge(cust_data, left_on='cust_code', right_on='BB_RETAIL_CUSTOMER_CODE', how="inner")
-    
-    # result = gbdtlr_model.inference_from_sample(order_data)
-    # result.to_csv("./data/junlong.csv", index=False)

+ 1 - 5
models/rank/gbdt_lr_inference.py

@@ -10,7 +10,6 @@ import pandas as pd
 from sklearn.preprocessing import StandardScaler
 import shap
 from tqdm import tqdm
-from utils import split_relation_subtable
 import os
 
 def generate_feats_map(product_data, cust_data):
@@ -246,8 +245,5 @@ if __name__ == "__main__":
     data = data["data"].sample(n=300, replace=True, random_state=42)
     data.to_csv("./data/data.csv", index=False)
     # data = data["data"]
-    result = gbdt_sort.generate_shap_interance(data)
-    print("保存结果")
-    result.to_csv("./data/feats_interaction.csv", index=False, encoding='utf-8-sig')
-    split_relation_subtable(result, "./data")
+    
     

+ 2 - 2
report.py

@@ -1,6 +1,6 @@
 import os
 import argparse
-from utils.report_utils import ReportUtils
+from utils import ReportUtils
     
     
 def run():
@@ -8,7 +8,7 @@ def run():
     
     parser.add_argument("--city_uuid", type=str, default="00000000000000000000000011445301")
     parser.add_argument("--product_id", type=str, default="350139")
-    parser.add_argument("--recall_count", type=int, default=1000)
+    parser.add_argument("--recall_count", type=int, default=500)
     parser.add_argument("--delivery_count", type=int, default=5000)
     
     # parser.add_argument()

+ 2 - 2
utils/__init__.py

@@ -1,7 +1,7 @@
 #!/usr/bin/env python3
 # -*- coding:utf-8 -*-
-from utils.result_process import split_relation_subtable
+from utils.report_utils import ReportUtils
 
 __all__ = [
-    "split_relation_subtable"
+    "ReportUtils"
 ]

+ 15 - 0
utils/reports_process.py

@@ -1,4 +1,5 @@
 from models.rank.data.config import ImportanceFeaturesMap
+import os
 import pandas as pd
 
 
@@ -100,4 +101,18 @@ def eval_report_process(eval_order_data, recommend_data):
         }
     )
     return merge_data
+
+def split_relation_subtable(data, filter_dict, save_dir):
+    """拆分卷烟商户特征相关性子表"""
+    data = filter_data(data, filter_dict).copy()
+    data.to_csv(os.path.join(save_dir, "feats_interaction.csv"), index=False, encoding='utf-8-sig')
+    data['group_key'] = data["product_feat"].str.extract(r'^([^(]+)')
+    grouped = data.groupby('group_key')
+    sub_tables = {
+        name: group.drop(columns=['group_key']).sort_values('relation', ascending=False)
+        for name, group in grouped
+    }
+    
+    for name, sub_data in sub_tables.items():
+        sub_data.to_csv(os.path.join(save_dir, f"{name}.csv"), index=False, encoding='utf-8-sig')
     

+ 0 - 140
utils/result_process.py

@@ -1,140 +0,0 @@
-import os
-import pandas as pd
-from database import MySqlDao
-from models.rank.data.config import ImportanceFeaturesMap, ProductConfig
-
-dao = MySqlDao()
-def filter_data(data, filter_dict):
-    product_content = []
-    for key, value in filter_dict.items():
-        if key != 'product_code':
-            product_content.append(f"{ImportanceFeaturesMap.PRODUCT_FEATRUES_MAP[key]}({value})")
-    
-    data = data[data['product_feat'].isin(product_content)]
-    return data
-
-def split_relation_subtable(data, filter_dict, save_dir):
-    """拆分卷烟商户特征相关性子表"""
-    data = filter_data(data, filter_dict).copy()
-    data.to_csv(os.path.join(save_dir, "feats_interaction.csv"), index=False, encoding='utf-8-sig')
-    data['group_key'] = data["product_feat"].str.extract(r'^([^(]+)')
-    grouped = data.groupby('group_key')
-    sub_tables = {
-        name: group.drop(columns=['group_key']).sort_values('relation', ascending=False)
-        for name, group in grouped
-    }
-    
-    for name, sub_data in sub_tables.items():
-        sub_data.to_csv(os.path.join(save_dir, f"{name}.csv"), index=False, encoding='utf-8-sig')
-        
-def generate_report(city_uuid, data, filter_dict, recommend_data, delivery_count, save_dir):
-    """根据总表筛选结果"""
-    # 1. 筛选商户相关性排序结果
-    data = filter_data(data, filter_dict).copy()
-    data.to_csv(os.path.join(save_dir, "feats_interaction.csv"), index=False, encoding='utf-8-sig')
-    group_sums = data.groupby("cust_feat")["relation"].sum()
-    # 筛选出总和非负的cust_feat
-    valid_cust_feats = group_sums[group_sums > 0].index.tolist()
-    cust_relation = data[data["cust_feat"].isin(valid_cust_feats)]
-    cust_relation = cust_relation.reset_index(drop=True)
-    cust_relation = cust_relation.rename(
-        columns = {
-            "product_feat": "卷烟特征",
-            "cust_feat": "商户特征",
-            "relation": "相关性"
-        }
-    )
-    cust_relation.to_csv(os.path.join(save_dir, "品规商户特征关系表.csv"), index=False, encoding='utf-8-sig')
-    
-    
-    # 2. 品规信息
-    cust_relation[:20].to_csv(os.path.join(save_dir, "cust_relation.csv"), index=False, encoding='utf-8-sig')
-    with open(os.path.join(save_dir, "卷烟信息表.csv"), "w", encoding='utf-8-sig') as f:
-        for key, value in filter_dict.items():
-            if key != 'product_code':
-                f.write(f"{ImportanceFeaturesMap.PRODUCT_FEATRUES_MAP[key]}, {value}\n")
-                
-    # 3. 生成推荐报告
-    recommend_report = generate_recommend_report(city_uuid, recommend_data, delivery_count)
-    recommend_report.to_csv(os.path.join(save_dir, "商户售卖推荐表.csv"), index=False, encoding="utf-8-sig")
-    
-def generate_recommend_report(city_uuid, recommend_data, delivery_count):
-    recommend_data = pd.DataFrame(recommend_data)
-    
-    recommend_list = recommend_data["cust_code"].to_list()
-    recommend_cust_info = dao.get_cust_by_ids(city_uuid, recommend_list)
-    
-    cust_ids = recommend_cust_info.set_index("BB_RETAIL_CUSTOMER_CODE")
-    recommend_data = recommend_data.join(cust_ids, on="cust_code", how="inner")
-    recommend_data = recommend_data[["cust_code", "BB_RETAIL_CUSTOMER_NAME", "sale_qty", "recommend_score"]]
-   # 1. 计算每个商户的理论应得数量(带小数)
-    recommend_data["delivery_float"] = (
-        recommend_data["recommend_score"] / recommend_data["recommend_score"].sum() * delivery_count
-    )
-
-    # 2. 向下取整得到基础配额
-    recommend_data["delivery_count"] = recommend_data["delivery_float"].astype(int)
-
-    # 3. 计算余数并排序
-    recommend_data["remainder"] = recommend_data["delivery_float"] - recommend_data["delivery_count"]
-    recommend_data = recommend_data.sort_values("remainder", ascending=False)
-
-    # 4. 将剩余配额按余数从大到小分配
-    remaining = delivery_count - recommend_data["delivery_count"].sum()
-    recommend_data.iloc[:remaining, recommend_data.columns.get_loc("delivery_count")] += 1
-    
-    recommend_data = recommend_data.drop(columns=["delivery_float", "remainder"])
-    recommend_data = recommend_data.reset_index()
-    # 5. 按recommend_score从大到小重新排序
-    recommend_data = recommend_data.sort_values("index")
-    recommend_data["sale_qty"] = recommend_data["sale_qty"].round(0).astype(int) # 将月均销量四舍五入取整
-    recommend_data = recommend_data.rename(
-        columns={
-            "index": "推荐序号", 
-            "cust_code": "商户编号", 
-            "BB_RETAIL_CUSTOMER_NAME": "商户名称", 
-            "sale_qty": "历史月均销量", 
-            "recommend_score": "推荐系数", 
-            "delivery_count": "建议投放量(条)"
-            }
-        )
-    recommend_data["推荐序号"] = recommend_data["推荐序号"] + 1
-    
-    return recommend_data
-       
-def get_cust_list_from_history_order(city_uuid, product_code):
-    # 获取订单数据并处理
-    order_data = dao.get_eval_order_by_product(city_uuid, product_code)
-    order_data = order_data[["cust_code", "cust_name", "product_code", "product_name", "sale_qty", "sale_amt"]]
-    
-    # 确保cust_code是字符串类型
-    order_data["cust_code"] = order_data["cust_code"].astype(str)
-    
-    order_data = order_data.groupby(["cust_code", "cust_name", "product_code", "product_name"])[["sale_qty", "sale_amt"]].mean().reset_index()
-    order_data["sale_qty"] = order_data["sale_qty"].round(0).astype(int)
-    order_data = order_data.sort_values("sale_qty", ascending=False)
-    
-    # 读取推荐数据
-    recommend_data = pd.read_csv('./data/商户售卖推荐表.csv')
-    # recommend_data = recommend_data.drop(columns=["sale_qty"])
-    # 确保recommend_data中的cust_code也是字符串类型
-    recommend_data["商户编号"] = recommend_data["商户编号"].astype(str)
-    cust_ids = recommend_data.set_index("商户编号")
-    
-    # 执行合并操作
-    merge_data = order_data.join(cust_ids, on="cust_code", how="left")
-    merge_data = merge_data[["cust_code", "cust_name", "product_code", "product_name", "sale_qty", "推荐序号", "推荐系数", "历史月均销量"]]
-    merge_data = merge_data.rename(
-        columns={
-            "cust_code": "商户编号", 
-            "cust_name": "商户名称", 
-            "product_code": "卷烟编码", 
-            "product_name": "卷烟名称", 
-            "sale_qty": "月均销量"
-        }
-    )
-    return merge_data
-        
-if __name__ == "__main__":
-    order_data = get_cust_list_from_history_order("00000000000000000000000011445301", "350355")
-    order_data.to_csv("./data/eval.csv", index=False)