# 数据分析工具 from __future__ import division # 除的结果是小数 import pandas as pd import numpy as np from scipy import stats def fill_fre_top_5(x): # 频数前五位的函数:若不足5,则nan补齐 if (len(x)) <= 5: new_array = np.full(5, np.nan) new_array[0:len(x)] = x return new_array def eda_analysis(missSet=[np.nan, 9999999999, -999999], df=None): # 种类计数 count_un = df.apply(lambda x: len(x.unique())) count_un = count_un.to_frame('count') # 转成dataframe # 零的计数 count_zero = df.apply(lambda x: np.sum(x == 0)) count_zero = count_zero.to_frame('count_zero') # 均值 df_mean = df.apply(lambda x: np.mean(x[~np.isin(x, missSet)])) df_mean = df_mean.to_frame('mean') # 中位数 df_median = df.apply(lambda x: np.median(x[~np.isin(x, missSet)])) df_median = df_median.to_frame('median') # 众数 df_mode = df.apply(lambda x: stats.mode(x[~np.isin(x, missSet)])[0][0]) df_mode = df_mode.to_frame('mode') # 众数的频数和比例 df_mode_count = df.apply(lambda x: stats.mode(x[~np.isin(x, missSet)])[1][0]) df_mode_count = df_mode_count.to_frame('mode_count') df_mode_perct = df_mode_count / df.shape[0] df_mode_perct.columns = ['mode_perct'] # 最小值 df_min = df.apply(lambda x: np.min(x[~np.isin(x, missSet)])) df_min = df_min.to_frame('min') # 最大值 df_max = df.apply(lambda x: np.min(x[~np.isin(x, missSet)])) df_max = df_max.to_frame('max') # 分位数 json_quantile = {} for i, name in enumerate(df.columns): json_quantile[name] = np.percentile(df[name][~np.isin(df[name], missSet)], (1, 5, 25, 50, 75, 95, 99)) df_quantile = pd.DataFrame(json_quantile)[df.columns].T df_quantile.columns = ['quan01', 'quan05', 'quan25', 'quan50', 'quan75', 'quan95', 'quan99'] # 频数(产出为数组,不能使用apply) json_fre_name = {} json_fre_count = {} for i, name in enumerate(df.columns): # 频数前五的数 index_name = df[name][~np.isin(df[name], missSet)].value_counts().iloc[0:5, ].index.values index_name = fill_fre_top_5(index_name) json_fre_name[name] = index_name # 频数前五的数的频数 value_count = df[name][~np.isin(df[name], missSet)].value_counts().iloc[0:5, ].values value_count = fill_fre_top_5(value_count) json_fre_count[name] = value_count df_fre_name = pd.DataFrame(json_fre_name)[df.columns].T df_fre_count = pd.DataFrame(json_fre_count)[df.columns].T df_fre = pd.concat([df_fre_name, df_fre_count], axis=1) # 合并 df_fre.columns = ['Value1', 'Value2', 'Value3', 'Value4', 'Value5', 'freq1', 'freq2', 'freq3', 'freq4', 'freq5'] # 缺失值 df_miss = df.apply(lambda x: np.sum(np.isin(x, missSet))) df_miss = df_miss.to_frame('freq_miss') # 合并所有统计指标 df_eda_summary = pd.concat( [count_un, count_zero, df_mean, df_median, df_mode, df_mode_count, df_mode_perct, df_min, df.max, df_fre, df_miss], axis=1) return df_eda_summary
工具 整合
1.整合核心代码:
# 12.Combine All Information
df_eda_summary = pd.concat(
[count_un, count_zero, df_mean, df_median, df_mode,
df_mode_count, df_mode_perct, df_min, df_max, df_fre,
df_miss], axis=1
)
# 左边是特征,上边是有多少统计描述,就拼多少
# -*- coding:utf-8 from __future__ import division #除的结果是小数 import pandas as pd import numpy as np from scipy import stats def fill_fre_top_5(x): #频数前五位的函数:若不足5,则nan补齐 if (len(x)) <= 5: new_array = np.full(5, np.nan) new_array[0:len(x)] = x return new_array def eda_analysis(missSet = [np.nan, 9999999999, -999999], df=None): #种类计数 count_un = df.apply(lambda x: len(x.unique())) count_un = count_un.to_frame('count') #转成dataframe #零的计数 count_zero = df.apply(lambda x: np.sum(x==0)) count_zero = count_zero.to_frame('count_zero') #均值 df_mean = df.apply(lambda x: np.mean(x[~np.isin(x, missSet)])) df_mean = df_mean.to_frame('mean') #中位数 df_median = df.apply(lambda x: np.median(x[~np.isin(x, missSet)])) df_median = df_median.to_frame('median') #众数 df_mode = df.apply(lambda x: stats.mode(x[~np.isin(x, missSet)])[0][0]) df_mode = df_mode.to_frame('mode') #众数的频数和比例 df_mode_count = df.apply(lambda x: stats.mode(x[~np.isin(x, missSet)])[1][0]) df_mode_count = df_mode_count.to_frame('mode_count') df_mode_perct = df_mode_count / df.shape[0] df_mode_perct.columns = ['mode_perct'] #最小值 df_min = df.apply(lambda x: np.min(x[~np.isin(x, missSet)])) df_min = df_min.to_frame('min') #最大值 df_max = df.apply(lambda x: np.min(x[~np.isin(x, missSet)])) df_max = df_max.to_frame('max') #分位数 json_quantile = {} for i, name in enumerate(df.columns): json_quantile[name] = np.percentile(df[name][~np.isin(df[name], missSet)], (1, 5, 25, 50, 75, 95, 99)) df_quantile = pd.DataFrame(json_quantile)[df.columns].T df_quantile.columns = ['quan01', 'quan05', 'quan25', 'quan50', 'quan75', 'quan95', 'quan99'] #频数(产出为数组,不能使用apply) json_fre_name = {} json_fre_count = {} for i, name in enumerate(df.columns): #频数前五的数 index_name = df[name][~np.isin(df[name], missSet)].value_counts().iloc[0:5, ].index.values index_name = fill_fre_top_5(index_name) json_fre_name[name] = index_name #频数前五的数的频数 value_count = df[name][~np.isin(df[name], missSet)].value_counts().iloc[0:5, ].values value_count = fill_fre_top_5(value_count) json_fre_count[name] = value_count df_fre_name = pd.DataFrame(json_fre_name)[df.columns].T df_fre_count = pd.DataFrame(json_fre_count)[df.columns].T df_fre = pd.concat([df_fre_name, df_fre_count], axis=1) #合并 df_fre.columns = ['Value1', 'Value2', 'Value3', 'Value4', 'Value5', 'freq1', 'freq2', 'freq3', 'freq4', 'freq5'] #缺失值 df_miss = df.apply(lambda x: np.sum(np.isin(x, missSet))) df_miss = df_miss.to_frame('freq_miss') #合并所有统计指标 df_eda_summary = pd.concat([count_un, count_zero, df_mean, df_median, df_mode, df_mode_count, df_mode_perct, df_min, df.max, df_fre, df_miss], axis=1) return df_eda_summary
调用以上模块:
import wrap_un import * df = pd.read_csv(".csv") label = df['TARGET'] df = df.drop(['ID', 'TARGET'], axis=1) df_eda_summary = eda_analysis(missSet=[np.nan, 9999999999, -999999], df=df.iloc[:, 0:3])
测试:
start = timeit.default_timer() df_eda_summary = eda_analysis(missSet=[np.nan, 9999999999, -999999], df=df.iloc[:, 0:3]) print "EDA Running Time: {0:.2f} seconds".format(timeit.default_timer()-start)
'''
Created on 2018年7月16日
@author: Administrator
'''
#-*-coding:utf-8-*-
from __future__ import division
import pandas as pd
import numpy as np
from scipy import stats
from datetime import datetime
def fill_fre_top_5(x):
if(len(x))<=5:
new_array =np.full(5, np.nan)
new_array[0:len(x)] =x
return new_array
def eda_analysis(missSet=[np.nan, 9999999999, -999999], df=None):
##1.Count
count_un = df.apply(lambda x:len(x.unique()))
count_un = count_un.to_frame('count')
##2.Count Zero
count_zero = df.apply(lambda x:np.sum(x==0))
count_un = count_zero.to_frame('count_zero')
##3.Mean
df_mean = df.apply(lambda x:np.mean(x[~np.isin(x, missSet)]))
df_mean = df_mean.to_frame('mean')
##4.Median
df_median = df.apply(lambda x:np.median(x[~np.isin(x, missSet)]))
df_median = df_median.to_frame('median')
##5.Mode
df_mode = df.apply(lambda x:stats.mode(x[~np.isin(x, missSet)])[0][0])
df_mode = df_mode.to_frame('mode')
##6.Mode Percentage
df_mode_count = df.apply(lambda x:stats.mode(x[~np.isin(x, missSet)])[1][0])
df_mode_count = df_mode_count.to_frame('mode_count')
df_mode_perct = df_mode_count/df.shape[0]
df_mode_perct.columns =['mode_perct']
##7.Min
df_min = df.apply(lambda x:np.min(x[~np.isin(x, missSet)]))
df_min = df_min.to_frame('min')
##8.Max
df_max = df.apply(lambda x:np.max(x[~np.isin(x, missSet)]))
df_max = df_max.to_frame('max')
##9.Quantile
json_quantile ={}
for i, name in enumerate(df.columns):
json_quantile[name] = np.percentile(df[name][~np.isin(df[name],missSet)],(1,5,25,50,75,95,99))
df_quantile = pd.DataFrame(json_quantile)[df.columns].T
df_quantile.columns=['quan01','quan05','quan25','quan50','quan75','quan95','quan99']
##10.Frequence
json_fre_name ={}
json_fre_count ={}
for i,name in enumerate(df.columns):
##1.Index Name
index_name = df[name][~np.isin(df[name],missSet)].value_counts().iloc[0:5, ].index.values
##1.1 If the length of array is less than 5
index_name=fill_fre_top_5(index_name)
##2.Value Count
values_count=df[name][~np.isin(df[name],missSet)].value_counts().iloc[0:5, ].values
##2.1 If the length of array is less than 5
values_count = fill_fre_top_5(values_count)
json_fre_count[name]=values_count
df_fre_name =pd.DataFrame(json_fre_name)[df.columns].T
df_fre_count =pd.DataFrame(json_fre_count)[df.columns].T
df_fre = pd.concat([df_fre_name, df_fre_count],axis=1)
df_fre.columns=['value1','value2','value3','value4','value5','freq1','freq2','freq3','freq4','freq5']
##11.Miss Value Count
df_miss = df.apply(lambda x:np.sum(np.isin(x,missSet)))
df_miss = df_miss.to_frame('freq_miss')
#####12.Combine All Informations#####
df_eda_summary = pd.concat(
[count_un, count_zero, df_mean, df_median,
df_mode, df_mode_count,df_mode_perct,
df_min,df_max,df_fre,df_miss], axis=1)
return df_eda_summary
'''
Created on 2018年7月10日
Kaggle网站公开的数据
@author: Administrator
'''
#-*-coding:utf-8-*-
from __future__ import division
import pandas as pd
import numpy as np
from scipy import stats
##0.Read Data ##
df = pd.read_csv("train.csv")
label = df['TARGET']
df = df.drop(['ID', 'TARGET'], axis = 1)
##1.Basic Analysis##
#(1)Missing Value#
missSet = [np.nan, 9999999999, -999999]
#(2)Count distinct#
len(df.iloc[:,0].unique())
count_un = df.iloc[:,0:3].apply(lambda x:len(x.unique()))
#(3)Zero Values#
np.sum(df.iloc[:,0]==0)
count_zero = df.iloc[:,0:3].apply(lambda x:np.sum(x==0))
#(4)Mean Values#
np.mean(df.iloc[:,0]) #没有去除缺失值之前的均值很低
df.iloc[:,0][~np.isin(df.iloc[:,0],missSet)]#去除缺失值
np.mean(df.iloc[:,0][~np.isin(df.iloc[:,0],missSet)])#去除缺失值后进行均值计算
df_mean = df.iloc[:,0:3].apply(lambda x:np.mean(x[~np.isin(x, missSet)]))
#(5)Median Values#
np.median(df.iloc[:,0])#没有去除缺失值之前
df.iloc[:,0][~np.isin(df.iloc[:,0], missSet)]#去除缺失值
np.median(df.iloc[:,0][~np.isin(df.iloc[:,0],missSet)])#去除缺失值后进行中位数计算
df_median = df.iloc[:,0:3].apply(lambda x:np.median(x[~np.isin(x, missSet)]))
#(6)Mode Values#
df_mode = df.iloc[:,0:3].apply(lambda x:stats.mode(x[~np.isin(x, missSet)])[0][0])
#(7)Mode Percentage#
df_mode_count = df.iloc[:,0:3].apply(lambda x:stats.mode(x[~np.isin(x, missSet)])[1][0])
df_mode_perct = df_mode_count/df.shape[0]
#(8)Min Values#
np.min(df.iloc[:,0])
df.iloc[:,0][~np.isin(df.iloc[:,0], missSet)]#去除缺失值
np.min(df.iloc[:,0][~np.isin(df.iloc[:,0],missSet)])#去除缺失值后进行最小值计算
df_min = df.iloc[:,0:3].apply(lambda x:np.min(x[~np.isin(x, missSet)]))
#(9)Max Values#
np.max(df.iloc[:,0])
df.iloc[:,0][~np.isin(df.iloc[:,0], missSet)]#去除缺失值
np.max(df.iloc[:,0][~np.isin(df.iloc[:,0],missSet)])#去除缺失值后进行最大值计算
df_max = df.iloc[:,0:3].apply(lambda x:np.max(x[~np.isin(x, missSet)]))
#(10)quantile Values#
np.percentile(df.iloc[:,0],(1,5,25,50,75,95,99))
df.iloc[:,0][~np.isin(df.iloc[:,0], missSet)]#去除缺失值
np.percentile(df.iloc[:,0][~np.isin(df.iloc[:,0], missSet)], (1,5,25,50,75,95,99))#去除缺失值后进行分位点计算
json_quantile ={}
for i,name in enumerate(df.iloc[:,0:3].columns):
print('the {} columns:{}').format(i,name)
json_quantile[name] = np.percentile(df[name][~np.isin(df[name],missSet)],(1,5,25,50,75,95,99))
df_quantile = pd.DataFrame(json_quantile)[df.iloc[:,0:3].columns].T
#(11)Frequent Values#
df.iloc[:,0].value_counts().iloc[0:5,]
df.iloc[:,0][~np.isin(df.iloc[:,0],missSet)]#去除缺失值
df.iloc[:,0][~np.isin(df.iloc[:,0], missSet)].value_counts()[0:5]#去除缺失值后进行频数的计算
json_fre_name =0
json_fre_count ={}
def fill_fre_top_5(x):
if(len(x))<=5:
new_array =np.full(5, np.nan)
new_array[0:len(x)] =x
return new_array
df['ind_var1_0'].value_counts()
df['imp_sal_var16_ultl'].value_counts()
for i,name in enumerate(df[['ind_var1_0','imp_sal_var16_ultl']].columns):
##1.Index Name
index_name = df[name][~np.isin(df[name],missSet)].value_counts().iloc[0:5, ].index.values
##1.1 If the length of array is less than 5
index_name=fill_fre_top_5(index_name)
##2.Value Count
values_count=df[name][~np.isin(df[name],missSet)].value_counts().iloc[0:5, ].values
##2.1 If the length of array is less than 5
values_count = fill_fre_top_5(values_count)
json_fre_count[name]=values_count
df_fre_name =pd.DataFrame(json_fre_name)[df[['ind_var1_0','imp_sal_var16_ultl']].columns].T
df_fre_count =pd.DataFrame(json_fre_count)[df[['ind_var1_0','imp_sal_var16_ultl']].columns].T
df_fre = pd.concat([df_fre_name, df_fre_count],axis=1)
#(12)Miss Values#
np.sum(np.isin(df.iloc[:,0],missSet))#统计缺失值
df_miss = df.iloc[:,0:3].apply(lambda x:np.sum(np.isin(x,missSet)))#遍历每一个遍历的缺失值情况