Sfoglia il codice sorgente

重新定义打标流程

yangzeyu 1 anno fa
parent
commit
b1d8be3b31
1 ha cambiato i file con 199 aggiunte e 0 eliminazioni
  1. 199 0
      models/rank/data/preprocess_test.py

+ 199 - 0
models/rank/data/preprocess_test.py

@@ -0,0 +1,199 @@
+from database import MySqlDao
+from models.rank.data.config import CustConfig, ProductConfig, OrderConfig
+import os
+import pandas as pd
+from sklearn.preprocessing import MinMaxScaler
+from sklearn.utils import shuffle
+import numpy as np
+
+class DataProcess():
+    def __init__(self, city_uuid, save_path):
+        self._mysql_dao = MySqlDao()
+        self._save_res_path = save_path
+        print("正在加载cust_info...")
+        self._cust_data = self._mysql_dao.load_cust_data(city_uuid)
+        print("正在加载product_info...")
+        self._product_data = self._mysql_dao.load_product_data(city_uuid)
+        print("正在加载order_info...")
+        # self._order_data = self._mysql_dao.load_cust_data(city_uuid)
+        self._order_data = self._mysql_dao.load_mock_order_data()
+        print("正在加载shopping_info...")
+        self._shopping_data = self._mysql_dao.load_shopping_data(city_uuid)
+        
+    def data_process(self):
+        """数据预处理"""
+        if os.path.exists(self._save_res_path):
+            os.remove(self._save_res_path)
+        
+        # 1. 获取指定的特征组合
+        self._cust_data = self._cust_data[CustConfig.FEATURE_COLUMNS]
+        self._product_data = self._product_data[ProductConfig.FEATURE_COLUMNS]
+        self._order_data = self._order_data[OrderConfig.FEATURE_COLUMNS]
+        
+        # 2. 数据清洗
+        self._clean_cust_data()
+        self._clean_product_data()
+        self._clean_order_data()
+        self._clean_shopping_data()
+        
+        # # 3. 将零售户信息表与卷烟信息表进行笛卡尔积连接
+        # self._descartes()
+        
+        # # 4. 根据order表中的信息给数据打标签
+        # self._labeled_data()
+        
+        # 3. 根据特征权重给order表中的记录打分
+        self._calculate_score()
+        
+        # 4. 根据中位数打标签
+        self.labeled_data()
+        
+        # 5. 选取训练样本
+        self._generate_train_data()
+        
+    
+    def _clean_cust_data(self):
+        """用户信息表数据清洗"""
+        # 根据配置规则清洗数据
+        for feature, rules, in CustConfig.CLEANING_RULES.items():
+            if rules["type"] == "num":
+                # 先将数值型字符串转换为数值
+                self._cust_data[feature] = pd.to_numeric(self._cust_data[feature], errors="coerce")
+                
+            if rules["method"] == "fillna":
+                if rules["opt"] == "fill":
+                    self._cust_data[feature] = self._cust_data[feature].fillna(rules["value"])
+                elif rules["opt"] == "replace":
+                    self._cust_data[feature] = self._cust_data[feature].fillna(self._cust_data[rules["value"]])
+                elif rules["opt"] == "mean":
+                    self._cust_data[feature] = self._cust_data[feature].fillna(self._cust_data[feature].mean())
+                self._cust_data[feature] = self._cust_data[feature].infer_objects(copy=False)
+    
+    def _clean_product_data(self):
+        """卷烟信息表数据清洗"""
+        for feature, rules, in ProductConfig.CLEANING_RULES.items():
+            if rules["type"] == "num":
+                self._product_data[feature] = pd.to_numeric(self._product_data[feature], errors="coerce")
+            
+            if rules["method"] == "fillna":
+                if rules["opt"] == "fill":
+                    self._product_data[feature] = self._product_data[feature].fillna(rules["value"])
+                elif rules["opt"] == "mean":
+                    self._product_data[feature] = self._product_data[feature].fillna(self._product_data[feature].mean())
+                self._product_data[feature] = self._product_data[feature].infer_objects(copy=False)
+                    
+    def _clean_order_data(self):
+        # 去除重复值和填补缺失值
+        self._order_data.drop_duplicates(inplace=True)
+        self._order_data.fillna(0, inplace=True)
+        self._order_data = self._order_data.infer_objects(copy=False)
+        
+    def _clean_shopping_data(self):
+        """处理商圈数据缺省值"""
+        self._shopping_data.drop(["cust_uuid", "longitude", "latitude", "range_radius"], axis=1, inplace=True)
+        remaining_cols = self._shopping_data.columns.drop(["city_uuid", "cust_code"])
+        col_with_missing = remaining_cols[self._shopping_data[remaining_cols].isnull().any()].tolist() # 判断有缺失的字段
+        col_all_missing = remaining_cols[self._shopping_data[remaining_cols].isnull().all()].to_list() # 全部缺失的字段
+        col_partial_missing = list(set(col_with_missing) - set(col_all_missing)) # 部分缺失的字段
+        
+        for col in col_partial_missing:
+            self._shopping_data[col] = self._shopping_data[col].fillna(self._shopping_data[col].mean())
+        
+        for col in col_all_missing:
+            self._shopping_data[col] = self._shopping_data[col].fillna(0).infer_objects(copy=False)
+    
+    def _generate_original_train_data(self):
+        union_data = self._union_order_cust_product()
+        scored_data = self._calculate_score(union_data)
+        
+        
+    
+    def _generate_pos_train_data(self):
+        pass
+    
+    def _generate_shopping_train_data(self):
+        pass
+    
+    def _union_order_cust_product(self):
+        """联合order表、商户表、卷烟表"""
+        union_data = self._order_score.copy()
+        union_data.rename(columns={"PRODUCT_CODE": "product_code"}, inplace=True)
+        union_data = union_data.drop(["YLT_TURNOVER_RATE", "YLT_BAR_PACKAGE_SALE_OCC", "POS_PACKAGE_PRICE"])
+        cust_feats = self._cust_data.set_index("BB_RETAIL_CUSTOMER_CODE")
+        product_feats = self._product_data.set_index("product_code")
+        
+        union_data = union_data.join(cust_feats, on="BB_RETAIL_CUSTOMER_CODE", how="inner")
+        union_data = union_data.join(product_feats, on="product_code", how="inner")
+        
+        return union_data
+        # self._train_data = shuffle(self._train_data, random_state=42)
+        
+    def _calculate_score(self, union_data):
+        """计算联合数据记录的分数"""
+        # 对参与算分的特征值进行归一化
+        scaler = MinMaxScaler()
+        union_data[list(OrderConfig.WEIGHTS.keys())] = scaler.fit_transform(union_data[list(OrderConfig.WEIGHTS.keys())])
+        # 计算加权分数
+        union_data["score"] = sum(self._order_score[feat] * weight 
+                          for feat, weight in OrderConfig.WEIGHTS.items())
+        
+        return union_data
+    
+    def labeled_data(self, scored_data):
+        """通过计算分数打标签"""
+        # 按品规分组计算中位数
+        product_medians = scored_data("PRODUCT_CODE")["score"].median().reset_index()
+        product_medians.columns = ["PRODUCT_CODE", "median_score"]
+        
+        # 合并中位数到原始订单数据
+        temp_data = pd.merge(scored_data, product_medians, on="PRODUCT_CODE")
+        
+        # 生成标签 (1: 大于等于中位数, 0: 小于中位数)
+        self._order_score["label"] = np.where(
+            self._order_score["score"] >= self._order_score["median_score"], 1, 0
+        )
+        self._order_score = self._order_score.sort_values("score", ascending=False)
+        self._order_score = self._order_score[["BB_RETAIL_CUSTOMER_CODE", "PRODUCT_CODE", "label"]]
+        self._order_score.rename(columns={"PRODUCT_CODE": "product_code"}, inplace=True)
+    
+    # def _descartes(self):
+    #     """将零售户信息与卷烟信息进行笛卡尔积连接"""
+    #     self._cust_data["descartes"] = 1
+    #     self._product_data["descartes"] = 1
+        
+    #     self._descartes_data = pd.merge(self._cust_data, self._product_data, on="descartes").drop("descartes", axis=1)
+        
+    # def _labeled_data_from_descartes(self):
+    #     """根据order表信息给descartes_data数据打标签"""
+    #     # 获取order表中的正样本组合
+    #     order_combinations = self._order_data[["BB_RETAIL_CUSTOMER_CODE", "PRODUCT_CODE"]].drop_duplicates()
+    #     order_set = set(zip(order_combinations["BB_RETAIL_CUSTOMER_CODE"], order_combinations["PRODUCT_CODE"]))
+        
+    #     # 在descartes_data中打标签:正样本为1,负样本为0
+    #     self._descartes_data['label'] = self._descartes_data.apply(
+    #         lambda row: 1 if (row['BB_RETAIL_CUSTOMER_CODE'], row['product_code']) in order_set else 0, axis=1)
+    
+    # def _generate_train_data_from_descartes(self):
+    #     """从descartes_data中生成训练数据"""
+    #     positive_samples = self._descartes_data[self._descartes_data["label"] == 1]
+    #     negative_samples = self._descartes_data[self._descartes_data["label"] == 0]
+        
+    #     positive_count = len(positive_samples)
+    #     negative_count = min(1 * positive_count, len(negative_samples))
+    #     print(positive_count)
+    #     print(negative_count)
+        
+    #     # 随机抽取2倍正样本数量的负样本
+    #     negative_samples_sampled = negative_samples.sample(n=negative_count, random_state=42)
+    #     # 合并正负样本
+    #     self._train_data = pd.concat([positive_samples, negative_samples_sampled], axis=0)
+    #     self._train_data = self._train_data.sample(frac=1, random_state=42).reset_index(drop=True)
+        
+    #     # 保存训练数据
+    #     self._train_data.to_csv(self._save_res_path, index=False)
+    
+if __name__ == '__main__':
+    city_uuid = "00000000000000000000000011445301"
+    save_path = "./models/rank/data/gbdt_data.csv"
+    processor = DataProcess(city_uuid, save_path)
+    processor.data_process()