2020數字中國 天池——智慧海洋建設 Top1%

文章目錄

這是前段時間參加的天池比賽,最終成績:20/3275。 下面先給出我的solution,最後再寫一點總結。

Solution


# coding: utf-8

# In[1]:


import numpy as np
import pandas as pd
import lightgbm as lgb
import math
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import os
import random
np.random.seed(78)
random.seed(78)


# In[2]:


features = []
def dis_lat_lon(lat1, lon1, lat2, lon2):
    R = 6373.0
    lat1 = math.radians(lat1)
    lon1 = math.radians(lon1)
    lat2 = math.radians(lat2)
    lon2 = math.radians(lon2)
    dlon = lon2 - lon1
    dlat = lat2 - lat1
    a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2
    c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
    distance = R * c
    return distance

def produce_feature(df, train):
    # 造統計數量的特徵,train爲時間段總數,df爲某時間段中某航速段
    nums = len(df)
    mean = np.mean(df['速度'])
    ratio = len(df) / len(train) if len(train) != 0 else 0
    std = np.std(df['速度'])
    v_ = df['速度'].quantile(0.75)

    return nums, mean, ratio, std, v_


def angle(a, b, c):
    # 計算空間中,連續三點所形成的角度
    ab = [aa - bb for aa, bb in zip(a, b)]
    bc = [bb - cc for cc, bb in zip(c, b)]

    nab = np.sqrt(sum((x ** 2.0 for x in ab)))
    ab = [x / nab for x in ab]

    nbc = np.sqrt(sum((x ** 2.0 for x in bc)))
    bc = [x / nbc for x in bc]
    scal = sum((aa * bb for aa, bb in zip(ab, bc)))
    if scal > 1:
        scal = 1
    elif scal < -1:
        scal = -1
    angle = int(math.acos(scal) * 180 / math.pi)
    angle = 180 - angle
    return angle


def produce_feature_v_xy(df):
    # 造統計各時間段座標信息
    k=df['y']/df['x']
    k_min = k.min()
    k_max = k.max()
    #k_mean = k.mean()
    #x_50_ = df['x'].quantile(0.5)
    x_min_ = df['x'].min()
    x_max_ = df['x'].max()
    y_min_ = df['y'].min()
    y_max_ = df['y'].max()
    x_max_y_min_ = df['x'].max() - df['y'].min()
    y_max_x_min_ = df['y'].max() - df['x'].min()
    x_25_ = df['x'].quantile(0.25)
    y_75_ = df['y'].quantile(0.75)
    if len(df) <= 1:
        xy_cov_ = 0
    else:
        xy_cov_ = df['x'].cov(df['y'])
    df['time'] = pd.to_datetime(df['time'], format='%m%d %H:%M:%S')
    t_diff = df['time'].diff().iloc[1:].dt.total_seconds()
    x_diff = df['x'].diff().iloc[1:].abs()
    y_diff = df['y'].diff().iloc[1:].abs()
    x_a_mean = (x_diff / t_diff).mean()
    y_a_mean = (y_diff / t_diff).mean()
    xy_a_ = np.sqrt(x_a_mean ** 2 + y_a_mean ** 2)
    return k_min,k_max,x_min_, x_max_, y_min_, y_max_, x_max_y_min_, y_max_x_min_, x_25_, y_75_, xy_cov_, xy_a_


def produce_feature_ang_ext(df):
    # 構造角度,距離等特徵###
    df['time'] = pd.to_datetime(df['time'], format='%m%d %H:%M:%S')
    df = df.sort_values(by='time')
    df['hour'] = df['time'].dt.hour
    df_tortuosity = df[['x', 'y', '方向', '速度', 'hour']].values.tolist()
    if len(df_tortuosity) > 1:
        ang_list = [0]
        dis_list = [0]
        for i in range(1, len(df_tortuosity) - 1):
            a = [df_tortuosity[i - 1][0], df_tortuosity[i - 1][1]]
            b = [df_tortuosity[i][0], df_tortuosity[i][1]]
            c = [df_tortuosity[i + 1][0], df_tortuosity[i + 1][1]]
#             dis = np.sqrt((float((a[0] - b[0]) ** 2) + float((a[1] - b[1]) ** 2)))
            dis = dis_lat_lon(a[0], a[1], b[0], b[1])
            dis_list.append(dis)
            if a == b or b == c or a == c:
                ang_list.append(0)
            else:
                res = angle(a, b, c)
                ang_list.append(int(res))

#         dis_list.append(np.sqrt((float((df_tortuosity[-1][0] - df_tortuosity[-2][0]) ** 2) + float(
#             (df_tortuosity[-1][1] - df_tortuosity[-2][1]) ** 2))))
        last_dis = dis_lat_lon(df_tortuosity[-1][0], df_tortuosity[-1][1], df_tortuosity[-2][0], df_tortuosity[-2][1])
        dis_list.append(last_dis)
        ang_list.append(int(ang_list[-1]))
        num_ang_all = len(ang_list)
        num_ang_0_100 = len([x for x in ang_list if x <= 100])
        ratio_ang_0_100 = num_ang_0_100 / num_ang_all

        num_ang_10_150 = len([x for x in ang_list if x > 10 and x < 150])
        ratio_ang_10_150 = num_ang_10_150 / num_ang_all

        num_ang_100_165 = len([x for x in ang_list if x > 100 and x < 165])
        ratio_ang_100_165 = num_ang_100_165 / num_ang_all

        df['est_d'] = ang_list
        df['est_dis'] = dis_list

        t_diff = df['time'].diff().iloc[1:].dt.total_seconds()

        t = [0]
        t.extend(t_diff.values.tolist())
        df['est_t'] = [x / 3600 for x in t]
        df['est_v'] = df['est_d'] / df['est_t']
        beg_end = dis_lat_lon(df_tortuosity[0][0], df_tortuosity[0][1], df_tortuosity[-1][0], df_tortuosity[-1][1])
    elif len(df_tortuosity) == 1:
        return 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 # , 0, 0, 0
    else:
        return 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 #, 0, 0, 0
    return df['est_v'].mean(), df['est_v'].std(), df['est_v'].quantile(0.75), df['est_d'].mean(), num_ang_0_100, ratio_ang_0_100, num_ang_10_150, ratio_ang_10_150, num_ang_100_165, ratio_ang_100_165, beg_end

def x_y_area_count(df,all_df):
    num_all = len(all_df)
    num_ = len(df)
    num_ratio_ = num_/num_all
    v_mean_c_ = df['速度'].mean()
    v_std_c_ = df['速度'].std()
    d_mean_c_ = df['方向'].mean()
    #x_mean_c = df['x'].mean()
    #x_max_ = 
    return [num_,num_ratio_,v_mean_c_,v_std_c_,d_mean_c_]

def read_information(path, know_type=True):
    df = pd.read_csv(path)
#     print(path)
    if know_type:
        df.columns = ['ship', 'x', 'y', '速度', '方向',  'time', 'type']
    else:
        df.columns = ['ship', 'x', 'y', '速度', '方向', 'time']
    # 構造角度,距離等特徵###
    df['time'] = pd.to_datetime(df['time'], format='%m%d %H:%M:%S')
    df = df.sort_values(by='time')
    df['hour'] = df['time'].dt.hour

    df_tortuosity = df[['x', 'y', '速度', '方向', 'hour']].values.tolist()
    ang_list = [0]
    dis_list = [0]
    for i in range(1, len(df_tortuosity) - 1):
        a = [df_tortuosity[i - 1][0], df_tortuosity[i - 1][1]]
        b = [df_tortuosity[i][0], df_tortuosity[i][1]]
        c = [df_tortuosity[i + 1][0], df_tortuosity[i + 1][1]]
#         dis = np.sqrt((float((a[0] - b[0]) ** 2) + float((a[1] - b[1]) ** 2)))
        dis = dis_lat_lon(a[0], a[1], b[0], b[1])
        dis_list.append(dis)
        if a == b or b == c or a == c:
            ang_list.append(0)
        else:
            res = angle(a, b, c)
            ang_list.append(int(res))
#     dis_list.append(np.sqrt((float((df_tortuosity[-1][0] - df_tortuosity[-2][0]) ** 2) + float((df_tortuosity[-1][1] - df_tortuosity[-2][1]) ** 2))))
    dis_list.append(dis_lat_lon(df_tortuosity[-1][0], df_tortuosity[-1][1], df_tortuosity[-2][0], df_tortuosity[-2][1]))
    ang_list.append(int(ang_list[-1]))
    num_ang_all = len(ang_list)
    num_ang_0_100 = len([x for x in ang_list if x <= 100])
    ratio_ang_0_100 = num_ang_0_100 / num_ang_all

    num_ang_10_150 = len([x for x in ang_list if x > 10 and x < 150])
    ratio_ang_10_150 = num_ang_10_150 / num_ang_all

    num_ang_100_165 = len([x for x in ang_list if x > 100 and x < 165])
    ratio_ang_100_165 = num_ang_100_165 / num_ang_all

    df['est_d'] = ang_list
    df['d_diff'] = df['est_d'] - df['方向']
    df['est_dis'] = dis_list

    t_diff = df['time'].diff().iloc[1:].dt.total_seconds()

    t = [0]
    t.extend(t_diff.values.tolist())

    df['est_t'] = [x / 3600 for x in t]

    df['est_v_dis'] = df['est_dis'] / df['est_t'] # 這個是est_v
    df['est_v_d'] = df['est_d'] / df['est_t'] # 這個是角速度
    df['v_diff'] = df['速度'] - df['est_v_dis']
#     beg_end = np.sqrt((float((df_tortuosity[0][0] - df_tortuosity[-1][0]) ** 2) + float(
#         (df_tortuosity[0][1] - df_tortuosity[-1][1]) ** 2)))
    beg_end = dis_lat_lon(df_tortuosity[0][0], df_tortuosity[0][1], df_tortuosity[-1][0], df_tortuosity[-1][1])
    features.append(int(df['ship'].unique()))
    features.append(df['est_v_d'].mean())
    features.append(df['est_v_d'].std())
    features.append(df['est_v_d'].quantile(0.75))
    
    features.append(df['v_diff'].mean())
    # features.append(df['v_diff'].max())
    # features.append(df['v_diff'].std())
    # features.append(df['v_diff'].quantile(0.75))
    
    features.append(df['d_diff'].mean())
    features.append(df['d_diff'].max())
    features.append(df['d_diff'].min())
    # features.append(df['d_diff'].std())
    # features.append(df['d_diff'].quantile(0.75))
    
    features.append(df['est_d'].mean())
    features.append(num_ang_0_100)
    features.append(ratio_ang_0_100)
    features.append(num_ang_10_150)
    features.append(ratio_ang_10_150)
    features.append(num_ang_100_165)
    features.append(ratio_ang_100_165)
    features.append(beg_end)
    night1 = df[19 <= df['hour']]
    night1 = night1[night1['hour'] < 23]
    night2_1 = df[23 <= df['hour']]
    night2_2 = df[df['hour'] <= 3]
    night2 = pd.concat([night2_1, night2_2], axis=0)
    night = pd.concat([night1, night2_1, night2_2], axis=0)

    day1 = df[3 < df['hour']]
    day1 = day1[day1['hour'] < 10]
    day2 = df[10 <= df['hour']]
    day2 = day2[day2['hour'] < 16]
    day3 = df[16 <= df['hour']]
    day3 = day3[day3['hour'] < 19]
    day = pd.concat([day1, day2, day3], axis=0)

    #根據時間段劃分後再統計
    k_min_1,k_max_1,x_min_n_1, x_max_n_1, y_min_n_1, y_max_n_1, x_max_y_min_n_1, y_max_x_min_n_1, x_25_n_1, y_75_n_1, xy_cov_n_1, xy_a_n_1 = produce_feature_v_xy(night1)
    k_min_2,k_max_2,x_min_n_2, x_max_n_2, y_min_n_2, y_max_n_2, x_max_y_min_n_2, y_max_x_min_n_2, x_25_n_2, y_75_n_2, xy_cov_n_2, xy_a_n_2 = produce_feature_v_xy(night2)
    k_min_3,k_max_3,x_min_d_1, x_max_d_1, y_min_d_1, y_max_d_1, x_max_y_min_d_1, y_max_x_min_d_1, x_25_d_1, y_75_d_1, xy_cov_d_1, xy_a_d_1 = produce_feature_v_xy(day1)
    k_min_4,k_max_4,x_min_d_2, x_max_d_2, y_min_d_2, y_max_d_2, x_max_y_min_d_2, y_max_x_min_d_2, x_25_d_2, y_75_d_2, xy_cov_d_2, xy_a_d_2 = produce_feature_v_xy(day2)
    k_min_5,k_max_5,x_min_d_3, x_max_d_3, y_min_d_3, y_max_d_3, x_max_y_min_d_3, y_max_x_min_d_3, x_25_d_3, y_75_d_3, xy_cov_d_3, xy_a_d_3 = produce_feature_v_xy(day3)
    features.extend(
        [k_min_1,k_max_1,x_min_n_1, x_max_n_1, y_min_n_1, y_max_n_1, x_max_y_min_n_1, y_max_x_min_n_1, x_25_n_1, y_75_n_1, xy_cov_n_1, xy_a_n_1])
    features.extend(
        [k_min_2,k_max_2,x_min_n_2, x_max_n_2, y_min_n_2, y_max_n_2, x_max_y_min_n_2, y_max_x_min_n_2, x_25_n_2, y_75_n_2, xy_cov_n_2,
         xy_a_n_2])
    features.extend(
        [k_min_3,k_max_3,x_min_d_1, x_max_d_1, y_min_d_1, y_max_d_1, x_max_y_min_d_1, y_max_x_min_d_1, x_25_d_1, y_75_d_1, xy_cov_d_1,
         xy_a_d_1])
    features.extend(
        [k_min_4,k_max_4,x_min_d_2, x_max_d_2, y_min_d_2, y_max_d_2, x_max_y_min_d_2, y_max_x_min_d_2, x_25_d_2, y_75_d_2, xy_cov_d_2,
         xy_a_d_2])
    features.extend(
        [k_min_5,k_max_5,x_min_d_3, x_max_d_3, y_min_d_3, y_max_d_3, x_max_y_min_d_3, y_max_x_min_d_3, x_25_d_3, y_75_d_3, xy_cov_d_3,
         xy_a_d_3])
    
    k_min_n,k_max_n,x_min_n_, x_max_n_, y_min_n_, y_max_n_, x_max_y_min_n_, y_max_x_min_n_, x_25_n_, y_75_n_, xy_cov_n_, xy_a_n_ = produce_feature_v_xy(night)
    k_min_d,k_max_d,x_min_d_, x_max_d_, y_min_d_, y_max_d_, x_max_y_min_d_, y_max_x_min_d_, x_25_d_, y_75_d_, xy_cov_d_, xy_a_d_ = produce_feature_v_xy(day)
    features.extend(
        [k_min_n,k_max_n,x_min_n_, x_max_n_, y_min_n_, y_max_n_, x_max_y_min_n_, y_max_x_min_n_, x_25_n_, y_75_n_, xy_cov_n_, xy_a_n_])
    features.extend(
        [k_min_d,k_max_d,x_min_d_, x_max_d_, y_min_d_, y_max_d_, x_max_y_min_d_, y_max_x_min_d_, x_25_d_, y_75_d_, xy_cov_d_, xy_a_d_])
    
    ###細分角度等特徵###
    est_v_m_1, est_v_s_1, est_v_75_1, est_d_1, num_ang_0_100_1, ratio_ang_0_100_1, num_ang_10_150_1, ratio_ang_10_150_1, num_ang_100_165_1, ratio_ang_100_165_1, beg_end_1 = produce_feature_ang_ext(night)
    est_v_m_2, est_v_s_2, est_v_75_2, est_d_2, num_ang_0_100_2, ratio_ang_0_100_2, num_ang_10_150_2, ratio_ang_10_150_2, num_ang_100_165_2, ratio_ang_100_165_2, beg_end_2 = produce_feature_ang_ext(day)

    features.extend([est_v_m_1, est_v_s_1, est_v_75_1, est_d_1, num_ang_0_100_1, ratio_ang_0_100_1, num_ang_10_150_1,
                     ratio_ang_10_150_1, num_ang_100_165_1, ratio_ang_100_165_1, beg_end_1])
    features.extend([est_v_m_2, est_v_s_2, est_v_75_2, est_d_2, num_ang_0_100_2, ratio_ang_0_100_2, num_ang_10_150_2,
                     ratio_ang_10_150_2, num_ang_100_165_2, ratio_ang_100_165_2, beg_end_2])
    
    
    #全局統計特徵
    features.append(df['x'].min())
    features.append(df['x'].max())
    features.append(df['x'].mean())
    features.append(df['x'].quantile(0.25))
    
    features.append(df['y'].min())
    features.append(df['y'].max())
    features.append(df['y'].mean())
    features.append(df['y'].quantile(0.75))

    features.append(df['x'].cov(df['y']))

    df['time'] = pd.to_datetime(df['time'], format='%m%d %H:%M:%S')
    t_diff = df['time'].diff().iloc[1:].dt.total_seconds()
    x_diff = df['x'].diff().iloc[1:].abs()
    y_diff = df['y'].diff().iloc[1:].abs()
    dis = sum(np.sqrt(x_diff ** 2 + y_diff ** 2))
    x_a_mean = (x_diff / t_diff).mean()
    y_a_mean = (y_diff / t_diff).mean()
    features.append(np.sqrt(x_a_mean ** 2 + y_a_mean ** 2))

    features.append(df['速度'].mean())
    features.append(df['速度'].std())
    features.append(df['速度'].quantile(0.75))

    features.append(df['方向'].mean())

    if(know_type):
        if(df["type"].iloc[0] == '拖網'):
            features.append(2)
        if(df["type"].iloc[0] == '刺網'):
            features.append(1)
        if(df["type"].iloc[0] == '圍網'):
            features.append(0)


# In[3]:


train_path = r"hy_round2_train_20200225/"
train_files = os.listdir(train_path)
train_files = list(np.sort(train_files))
length_tr = len(train_files)
for files in train_files:
    path = train_path + str(files)
    read_information(path, know_type=True)
train_data = pd.DataFrame(np.array(features).reshape(length_tr, int(len(features) / length_tr)))
train_data.columns = ['ship', 'est_v_m', 'est_v_s', 'est_v_75', 'v_diff_mean', 
                      'd_diff_mean', 'd_diff_max', 'd_diff_min', 
                      'est_d', 'num_ang_0_100', 'ratio_ang_0_100', 'num_ang_10_150',
                      'ratio_ang_10_150', 'num_ang_100_165', 'ratio_ang_100_165', 'beg_end',
                      'k_min_1','k_max_1','x_min_n_1', 'x_max_n_1', 'y_min_n_1', 'y_max_n_1', 'x_max_y_min_n_1', 'y_max_x_min_n_1',
                      'x_25_n_1', 'y_75_n_1', 'xy_cov_n_1', 'xy_a_n_1',
                      'k_min_2','k_max_2','x_min_n_2', 'x_max_n_2', 'y_min_n_2', 'y_max_n_2', 'x_max_y_min_n_2', 'y_max_x_min_n_2',
                      'x_25_n_2', 'y_75_n_2', 'xy_cov_n_2', 'xy_a_n_2',
                      'k_min_3','k_max_3','x_min_d_1', 'x_max_d_1', 'y_min_d_1', 'y_max_d_1', 'x_max_y_min_d_1', 'y_max_x_min_d_1',
                      'x_25_d_1', 'y_75_d_1', 'xy_cov_d_1', 'xy_a_d_1',
                      'k_min_4','k_max_4','x_min_d_2', 'x_max_d_2', 'y_min_d_2', 'y_max_d_2', 'x_max_y_min_d_2', 'y_max_x_min_d_2',
                      'x_25_d_2', 'y_75_d_2', 'xy_cov_d_2', 'xy_a_d_2',
                      'k_min_5','k_max_5','x_min_d_3', 'x_max_d_3', 'y_min_d_3', 'y_max_d_3', 'x_max_y_min_d_3', 'y_max_x_min_d_3',
                      'x_25_d_3', 'y_75_d_3', 'xy_cov_d_3', 'xy_a_d_3',
                      'k_min_n','k_max_n','x_min_n_', 'x_max_n_', 'y_min_n_', 'y_max_n_', 'x_max_y_min_n_', 'y_max_x_min_n_',
                      'x_25_n_', 'y_75_n_', 'xy_cov_n_', 'xy_a_n_',
                      'k_min_d','k_max_d','x_min_d_', 'x_max_d_', 'y_min_d_', 'y_max_d_', 'x_max_y_min_d_', 'y_max_x_min_d_',
                      'x_25_d_', 'y_75_d_', 'xy_cov_d_', 'xy_a_d_',
                      'est_v_m_1', 'est_v_s_1', 'est_v_75_1', 'est_d_1', 'num_ang_0_100_1', 'ratio_ang_0_100_1',
                      'num_ang_10_150_1', 'ratio_ang_10_150_1', 'num_ang_100_165_1', 'ratio_ang_100_165_1', 'beg_end_1', 
                      'est_v_m_2', 'est_v_s_2', 'est_v_75_2', 'est_d_2', 'num_ang_0_100_2', 'ratio_ang_0_100_2',
                      'num_ang_10_150_2', 'ratio_ang_10_150_2', 'num_ang_100_165_2', 'ratio_ang_100_165_2', 'beg_end_2', 
                      'x_min', 'x_max', 'x_mean', 'x_1/4', 'y_min', 'y_max', 'y_mean', 'y_3/4', 'xy_cov', 'a',
                      'v_mean', 'v_std', 'v_3/4', 'd_mean', 'type']
train_data.fillna(0, inplace=True)


# In[5]:


features = []
test_path = r'hy_round2_testA_20200225/'
test_files = os.listdir(test_path)
test_files = list(np.sort(test_files))
length_te = len(test_files)
for files in test_files:
    path = test_path + str(files)
    read_information(path, know_type=False)
test_data = pd.DataFrame(np.array(features).reshape(length_te, int(len(features) / length_te)))
test_data.columns = ['ship', 'est_v_m', 'est_v_s', 'est_v_75', 'v_diff_mean', 
                      'd_diff_mean', 'd_diff_max', 'd_diff_min', 
                      'est_d', 'num_ang_0_100', 'ratio_ang_0_100', 'num_ang_10_150',
                      'ratio_ang_10_150', 'num_ang_100_165', 'ratio_ang_100_165', 'beg_end',
                      'k_min_1','k_max_1','x_min_n_1', 'x_max_n_1', 'y_min_n_1', 'y_max_n_1', 'x_max_y_min_n_1', 'y_max_x_min_n_1',
                      'x_25_n_1', 'y_75_n_1', 'xy_cov_n_1', 'xy_a_n_1',
                      'k_min_2','k_max_2','x_min_n_2', 'x_max_n_2', 'y_min_n_2', 'y_max_n_2', 'x_max_y_min_n_2', 'y_max_x_min_n_2',
                      'x_25_n_2', 'y_75_n_2', 'xy_cov_n_2', 'xy_a_n_2',
                      'k_min_3','k_max_3','x_min_d_1', 'x_max_d_1', 'y_min_d_1', 'y_max_d_1', 'x_max_y_min_d_1', 'y_max_x_min_d_1',
                      'x_25_d_1', 'y_75_d_1', 'xy_cov_d_1', 'xy_a_d_1',
                      'k_min_4','k_max_4','x_min_d_2', 'x_max_d_2', 'y_min_d_2', 'y_max_d_2', 'x_max_y_min_d_2', 'y_max_x_min_d_2',
                      'x_25_d_2', 'y_75_d_2', 'xy_cov_d_2', 'xy_a_d_2',
                      'k_min_5','k_max_5','x_min_d_3', 'x_max_d_3', 'y_min_d_3', 'y_max_d_3', 'x_max_y_min_d_3', 'y_max_x_min_d_3',
                      'x_25_d_3', 'y_75_d_3', 'xy_cov_d_3', 'xy_a_d_3',
                      'k_min_n','k_max_n','x_min_n_', 'x_max_n_', 'y_min_n_', 'y_max_n_', 'x_max_y_min_n_', 'y_max_x_min_n_',
                      'x_25_n_', 'y_75_n_', 'xy_cov_n_', 'xy_a_n_',
                      'k_min_d','k_max_d','x_min_d_', 'x_max_d_', 'y_min_d_', 'y_max_d_', 'x_max_y_min_d_', 'y_max_x_min_d_',
                      'x_25_d_', 'y_75_d_', 'xy_cov_d_', 'xy_a_d_',
                      'est_v_m_1', 'est_v_s_1', 'est_v_75_1', 'est_d_1', 'num_ang_0_100_1', 'ratio_ang_0_100_1',
                      'num_ang_10_150_1', 'ratio_ang_10_150_1', 'num_ang_100_165_1', 'ratio_ang_100_165_1', 'beg_end_1', 
                      'est_v_m_2', 'est_v_s_2', 'est_v_75_2', 'est_d_2', 'num_ang_0_100_2', 'ratio_ang_0_100_2',
                      'num_ang_10_150_2', 'ratio_ang_10_150_2', 'num_ang_100_165_2', 'ratio_ang_100_165_2', 'beg_end_2', 
                      'x_min', 'x_max', 'x_mean', 'x_1/4', 'y_min', 'y_max', 'y_mean', 'y_3/4', 'xy_cov', 'a',
                      'v_mean', 'v_std', 'v_3/4', 'd_mean']
test_data.fillna(0, inplace=True)


# In[4]:


kind = train_data.type
train_data = train_data.drop('type', axis=1)


features = [x for x in train_data.columns]
train_data = train_data[features]
#test_data = test_data[features]


# In[5]:


x_train, x_test, y_train, y_test = train_test_split(train_data, kind, test_size=0.1, random_state=78)


# In[6]:


params = {
    'learning_rate': 0.2036,
    'max_depth': 6,  # .6787,
    'boosting': 'gbdt',
    'objective': 'multiclass',
    'n_estimators': 5561,
    'num_class': 3,
    'feature_fraction': .5242,
    'bagging_fraction': .3624,
    'class_weight': {0: 3, 1: 5, 2: 2.5},
    'seed':78
    # 'early_stopping_rounds': 100
}

llf = lgb.LGBMClassifier(**params)
llf.fit(x_train, y_train)
weight_lgb = f1_score(y_test, llf.predict(x_test), average='macro')


details = []
answers = []
scores = []
sk = StratifiedKFold(n_splits=20, shuffle=True, random_state=2020)
for train, test in sk.split(train_data, kind):
    x_train = train_data.iloc[train]
    y_train = kind.iloc[train]
    x_test = train_data.iloc[test]
    y_test = kind.iloc[test]

    llf.fit(x_train, y_train)
    pred_llf = llf.predict(x_test)
    weight_lgb = f1_score(y_test, pred_llf, average='macro')

    prob_lgb = llf.predict_proba(x_test)
    prob_end = prob_lgb
    score = f1_score(y_test, np.argmax(prob_end, axis=1), average='macro')
    scores.append(score)
    
    details.append(score)
    details.append(weight_lgb)

    #answers.append(llf.predict(test_data))
    print('score: ', score)
print(np.mean(details))


# In[7]:


#print(answers)
#使用貝葉斯優化調參
from sklearn import metrics
params = {
    'learning_rate': 0.2036,
   'max_depth': 6,#.6787,
    'boosting': 'gbdt',
   'objective': 'multiclass',
    'n_estimators': 5561,
    'num_class': 3,
    'feature_fraction': .5242,
    'bagging_fraction': .3624,
    'early_stopping_rounds': 100
}
fold = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)

X = train_data.copy()
y = kind
models = []
#pred = np.zeros((len(test_data),3))
oof = np.zeros((len(X), 3))
for index, (train_idx, val_idx) in enumerate(fold.split(X, y)):

    train_set = lgb.Dataset(X.iloc[train_idx], y.iloc[train_idx])
    val_set = lgb.Dataset(X.iloc[val_idx], y.iloc[val_idx])
    #print(y.iloc[train_idx])

    model = lgb.train(params, train_set, valid_sets=[train_set, val_set], verbose_eval=100)
    models.append(model)
    #print(X.iloc[val_idx][0:5])
    val_pred = model.predict(X.iloc[val_idx])
    oof[val_idx] = val_pred
    #print('val_pred',val_pred[0:5])
    val_y = y.iloc[val_idx]
    val_pred = np.argmax(val_pred, axis=1)
    #print('val_y',val_y[0:5])
    #print('val_pred',val_pred[0:5])
    print(index, 'val f1', metrics.f1_score(val_y, val_pred, average='macro'))
    # 0.8695539641133697
    # 0.8866211724839532

    #test_pred = model.predict(test_data)
    #pred += test_pred/5
oof = np.argmax(oof, axis=1)
print('oof f1', metrics.f1_score(oof, y, average='macro'))
# 0.8701544575329372


# In[8]:


x_train, x_test, y_train, y_test = train_test_split(train_data, kind, test_size=0.1, random_state=78)
params = {
    'learning_rate': 0.2036,
    'max_depth': 6,  # .6787,
    'boosting': 'gbdt',
    'objective': 'multiclass',
    'n_estimators': 5561,
    'num_class': 3,
    'feature_fraction': .5242,
    'bagging_fraction': .3624,
    'class_weight': {0: 3, 1: 5, 2: 2.5},
    'seed':78
    # 'early_stopping_rounds': 100
}

llf = lgb.LGBMClassifier(**params)
llf.fit(x_train, y_train)
weight_lgb = f1_score(y_test, llf.predict(x_test), average='macro')
print(weight_lgb)

總結

比賽理解:各種合理有效交叉特徵可以批量擴展,結合業務背景往往比較精準。造特徵要批量進行(別一個個,別試圖學習test),但要注意,隨時考慮過擬合(即考慮線下線上,ab榜數據可能區別,避免造只適合train的特徵),合理的驗證:(雙層十折)/林有夕利用AUC的方法驗證gap。

比賽總結:

  1. 探索數據可視化與清洗(極大過擬合,利用了label清洗(作弊),沒有考慮線上線下要相同處理問題)。
  2. 探索利用數據的方式,直接用上全部數據(過多噪音,過擬合嚴重)
  3. 開始進行正規,造特徵,但是未找到有效造特徵方式
  4. 在依據(論文,諮詢漁民)基礎上,有依據地造特徵
  5. 嘗試了一些工作:
    • 模型融合(xgb、cat、lgb)—無效
    • 特徵選擇—根據feature重要性選擇—無效,RFE選擇—無效
    • 數據清洗—無效
    • 貝葉斯優化—提高6-7個千分點
    • 樣本不平衡處理:lgb,xgb參數—有效(調整權重);smote等方式—無效
  6. 造特徵工作:
    • 引入一些統計性特徵:有效
    • 按時間段劃分特徵:有效 (不同類特徵,按照不同時間段劃分)
    • 拐角類特徵:有效
    • 魔法組合特徵:有效
    • 按日夜細分拐角類,按五個時間段細分座標類有效(但其它劃分無效)
    • 調節閥值(無效)
    • 引入意義接近但不夠精準的特徵,無效
    • 距離類特徵:無效
  7. 需要改進工作:
    • 線下線上不一致,需要可靠驗證方式(最重要點之一)
    • 融合需要nn等差異大模型
    • 特徵選擇
    • ab榜數據區別沒考慮,導致造的絕對特徵過擬合
    • ship等有泄露特徵導致過擬合
    • 特徵選擇未有有效方法
  8. 其他隊伍有效方法:
    • 預判ab榜差異(造相對特徵而不是絕對特徵)
    • 造地圖匹配特徵
    • 海岸線距離特徵
    • 兩層驗證更有效
    • 使用auc指標考察線上線下gap
    • word2vec
    • 使用nn融合
    • 批量造特徵,防過擬合
    • embedding技術

隊友總結:

  1. 8729: 對原始數據(x, y, v, d, t)進行統計(‘max’,‘min’,‘mean’,‘std’,‘skew’,‘sum’),lgb, (另加xgb+lgb+cgb)
  2. 全信息:xy+hour+weekend 到 lgb
  3. 8729 + xy_by_t(細分、總分)+ v_by_t + xy_by_v + 最大最小xy的面積
  4. 加入軌跡特徵(拐角、兩點距離、est_v),不同閾值的劃分和統計;和在t上的劃分
  5. 經緯度統計,v_diff, d_diff
  6. 魔法特徵
  7. 特徵選擇: RFE
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章