import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
import sklearn
import xgboost as xgb
# sklearn
from sklearn import model_selection # split함수이용
from sklearn import ensemble # RF,GBM
from sklearn import metrics
from sklearn.metrics import precision_score, recall_score, f1_score, roc_curve, auc
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
# gnn
import torch
import torch.nn.functional as F
import torch_geometric
from torch_geometric.nn import GCNConv
imports
def down_sample_textbook(df):
df_majority = df[df.is_fraud==0].copy()
df_minority = df[df.is_fraud==1].copy()
df_maj_dowsampled = sklearn.utils.resample(df_majority, n_samples=len(df_minority), replace=False, random_state=42)
df_downsampled = pd.concat([df_minority, df_maj_dowsampled])
return df_downsampled
def compute_time_difference(group):
n = len(group)
result = []
for i in range(n):
for j in range(n):
time_difference = abs(group.iloc[i].trans_date_trans_time.value - group.iloc[j].trans_date_trans_time.value)
result.append([group.iloc[i].name, group.iloc[j].name, time_difference])
return result
def mask(df):
df_tr,df_test = sklearn.model_selection.train_test_split(df, random_state=42)
N = len(df)
train_mask = [i in df_tr.index for i in range(N)]
test_mask = [i in df_test.index for i in range(N)]
train_mask = np.array(train_mask)
test_mask = np.array(test_mask)
return train_mask, test_mask
def edge_index_selected(edge_index):
theta = edge_index[:,2].mean()
edge_index[:,2] = (np.exp(-edge_index[:,2]/theta) != 1)*(np.exp(-edge_index[:,2]/theta))
edge_index = edge_index.tolist()
mean_ = np.array(edge_index)[:,2].mean()
selected_edges = [(int(row[0]), int(row[1])) for row in edge_index if row[2] > mean_]
edge_index_selected = torch.tensor(selected_edges, dtype=torch.long).t()
return edge_index_selected
file_url = 'https://media.githubusercontent.com/media/musthave-ML10/data_source/main/fraud.csv'
fraudTrain = pd.read_csv(file_url)# fraudTrain = pd.read_csv("~/Desktop/fraudTrain.csv").iloc[:,1:]fraudTrain = fraudTrain.assign(trans_date_trans_time= list(map(lambda x: pd.to_datetime(x), fraudTrain.trans_date_trans_time)))
fraudTrain| trans_date_trans_time | cc_num | merchant | category | amt | first | last | gender | street | city | ... | lat | long | city_pop | job | dob | trans_num | unix_time | merch_lat | merch_long | is_fraud | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 2019-01-01 00:00:18 | 2703186189652095 | fraud_Rippin, Kub and Mann | misc_net | 4.97 | Jennifer | Banks | F | 561 Perry Cove | Moravian Falls | ... | 36.0788 | -81.1781 | 3495 | Psychologist, counselling | 1988-03-09 | 0b242abb623afc578575680df30655b9 | 1325376018 | 36.011293 | -82.048315 | 0 |
| 1 | 2019-01-01 00:00:44 | 630423337322 | fraud_Heller, Gutmann and Zieme | grocery_pos | 107.23 | Stephanie | Gill | F | 43039 Riley Greens Suite 393 | Orient | ... | 48.8878 | -118.2105 | 149 | Special educational needs teacher | 1978-06-21 | 1f76529f8574734946361c461b024d99 | 1325376044 | 49.159047 | -118.186462 | 0 |
| 2 | 2019-01-01 00:00:51 | 38859492057661 | fraud_Lind-Buckridge | entertainment | 220.11 | Edward | Sanchez | M | 594 White Dale Suite 530 | Malad City | ... | 42.1808 | -112.2620 | 4154 | Nature conservation officer | 1962-01-19 | a1a22d70485983eac12b5b88dad1cf95 | 1325376051 | 43.150704 | -112.154481 | 0 |
| 3 | 2019-01-01 00:01:16 | 3534093764340240 | fraud_Kutch, Hermiston and Farrell | gas_transport | 45.00 | Jeremy | White | M | 9443 Cynthia Court Apt. 038 | Boulder | ... | 46.2306 | -112.1138 | 1939 | Patent attorney | 1967-01-12 | 6b849c168bdad6f867558c3793159a81 | 1325376076 | 47.034331 | -112.561071 | 0 |
| 4 | 2019-01-01 00:03:06 | 375534208663984 | fraud_Keeling-Crist | misc_pos | 41.96 | Tyler | Garcia | M | 408 Bradley Rest | Doe Hill | ... | 38.4207 | -79.4629 | 99 | Dance movement psychotherapist | 1986-03-28 | a41d7549acf90789359a9aa5346dcb46 | 1325376186 | 38.674999 | -78.632459 | 0 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 1852389 | 2020-12-31 23:59:07 | 30560609640617 | fraud_Reilly and Sons | health_fitness | 43.77 | Michael | Olson | M | 558 Michael Estates | Luray | ... | 40.4931 | -91.8912 | 519 | Town planner | 1966-02-13 | 9b1f753c79894c9f4b71f04581835ada | 1388534347 | 39.946837 | -91.333331 | 0 |
| 1852390 | 2020-12-31 23:59:09 | 3556613125071656 | fraud_Hoppe-Parisian | kids_pets | 111.84 | Jose | Vasquez | M | 572 Davis Mountains | Lake Jackson | ... | 29.0393 | -95.4401 | 28739 | Futures trader | 1999-12-27 | 2090647dac2c89a1d86c514c427f5b91 | 1388534349 | 29.661049 | -96.186633 | 0 |
| 1852391 | 2020-12-31 23:59:15 | 6011724471098086 | fraud_Rau-Robel | kids_pets | 86.88 | Ann | Lawson | F | 144 Evans Islands Apt. 683 | Burbank | ... | 46.1966 | -118.9017 | 3684 | Musician | 1981-11-29 | 6c5b7c8add471975aa0fec023b2e8408 | 1388534355 | 46.658340 | -119.715054 | 0 |
| 1852392 | 2020-12-31 23:59:24 | 4079773899158 | fraud_Breitenberg LLC | travel | 7.99 | Eric | Preston | M | 7020 Doyle Stream Apt. 951 | Mesa | ... | 44.6255 | -116.4493 | 129 | Cartographer | 1965-12-15 | 14392d723bb7737606b2700ac791b7aa | 1388534364 | 44.470525 | -117.080888 | 0 |
| 1852393 | 2020-12-31 23:59:34 | 4170689372027579 | fraud_Dare-Marvin | entertainment | 38.13 | Samuel | Frey | M | 830 Myers Plaza Apt. 384 | Edmond | ... | 35.6665 | -97.4798 | 116001 | Media buyer | 1993-05-10 | 1765bb45b3aa3224b4cdcb6e7a96cee3 | 1388534374 | 36.210097 | -97.036372 | 0 |
1852394 rows × 22 columns
street/tsate/zip: 고객 거주지 정보
lat/long: rhror wnthdp eogks dnleh alc rudeh
city_pop: 고객의 zipcode에 속하는 인구 수
job: 직업
dob: 생년월일
trans_num: 거래번호
unix_time: 거래 시간(유닉스 타임 스탬프 형식)
데이터정리
_df1 = fraudTrain[fraudTrain["is_fraud"] == 0].sample(frac=0.20, random_state=42)
_df2 = fraudTrain[fraudTrain["is_fraud"] == 1]
df02 = pd.concat([_df1,_df2])
df02.shape(378200, 22)
df50 = down_sample_textbook(df02)
df50 = df50.reset_index()
df50.shape(19302, 23)
amt_info = df50.groupby('cc_num').agg(['mean','std'])['amt'].reset_index()/tmp/ipykernel_2870536/562656812.py:1: FutureWarning: ['trans_date_trans_time', 'merchant', 'category', 'first', 'last', 'gender', 'street', 'city', 'state', 'job', 'dob', 'trans_num'] did not aggregate successfully. If any error is raised this will raise in a future version of pandas. Drop these columns/ops to avoid this warning.
amt_info = df50.groupby('cc_num').agg(['mean','std'])['amt'].reset_index()
df50 = df50.merge(amt_info, on ='cc_num', how='left')df50['amt_z_score'] = (df50['amt'] - df50['mean']) / df50['std']df50.drop(['mean','std'], axis=1, inplace=True)tr/test
mask(df50)(array([False, True, True, ..., True, False, True]),
array([ True, False, False, ..., False, True, False]))
train_mask, test_mask = mask(df50)df50_tr,df50_test = sklearn.model_selection.train_test_split(df50, random_state=42)edge_index 설정
groups = df50.groupby('cc_num')
edge_index_list_plus = [compute_time_difference(group) for _, group in groups]
edge_index_list_plus_flat = [item for sublist in edge_index_list_plus for item in sublist]
edge_index_list_plus_nparr = np.array(edge_index_list_plus_flat)
np.save('edge_index_list_plus50_2.npy', edge_index_list_plus_nparr)edge_index = np.load('edge_index_list_plus50_2.npy').astype(np.float64)
edge_index.shape(429162, 3)
edge_index_selected = edge_index_selected(edge_index)data설정(x, edge_index, y)
x = torch.tensor(df50['amt_z_score'], dtype=torch.float).reshape(-1,1)
y = torch.tensor(df50['is_fraud'],dtype=torch.int64)
data = torch_geometric.data.Data(x=x, edge_index = edge_index_selected, y=y, train_mask = train_mask, test_mask = test_mask)
dataData(x=[19302, 1], edge_index=[2, 195056], y=[19302], train_mask=[19302], test_mask=[19302])
분석 1(GCN)
torch.manual_seed(202250926)
class GCN1(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = GCNConv(1, 32)
self.conv2 = GCNConv(32,2)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.conv1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
X = (data.x[data.train_mask]).numpy()
XX = (data.x[data.test_mask]).numpy()
y = (data.y[data.train_mask]).numpy()
yy = (data.y[data.test_mask]).numpy()
model = GCN1()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
model.train()
for epoch in range(400):
optimizer.zero_grad()
out = model(data)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
model.eval()
pred = model(data).argmax(dim=1)
yyhat = pred[data.test_mask]
pred
metrics = [sklearn.metrics.accuracy_score,
sklearn.metrics.precision_score,
sklearn.metrics.recall_score,
sklearn.metrics.f1_score]
_results1= pd.DataFrame({m.__name__:[m(yy,yyhat).round(6)] for m in metrics},index=['분석1'])
_results1| accuracy_score | precision_score | recall_score | f1_score | |
|---|---|---|---|---|
| 분석1 | 0.897431 | 0.832287 | 0.994169 | 0.906054 |