使用逻辑回归构建信用评分卡全流程 Python 脚本

本脚本演示了如何使用逻辑回归构建信用评分卡,涵盖了数据预处理、特征工程、模型训练、评分卡转换和稳定性评估等步骤,并详细解释了每个步骤的代码和原理。

1. 导入包

我们需要导入以下包:

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, roc_curve, roc_auc_score

2. 导入数据

我们需要导入数据集,并查看数据的基本信息,例如数据维度、数据类型、缺失值情况等。

data = pd.read_csv('data.csv')
print(data.shape)
print(data.dtypes)
print(data.isnull().sum())

3. EDA

我们需要进行探索性数据分析 (EDA),以了解数据的分布、关系和异常值。我们可以使用散点图、箱线图、直方图和密度图等可视化工具来展示数据的分布情况。

sns.scatterplot(x='age', y='default', data=data)
sns.boxplot(x='default', y='income', data=data)
sns.distplot(data['age'])
sns.kdeplot(data['income'])

4. 数据分箱

为了将连续的变量转换为离散的变量,我们需要对变量进行分箱。常用的分箱方法有等频分箱、等距分箱和最优分箱 (基于卡方值或最小熵) 等。

# 等频分箱
data['age_bin'] = pd.qcut(data['age'], q=10, duplicates='drop')
data['income_bin'] = pd.qcut(data['income'], q=10, duplicates='drop')

# 等距分箱
data['age_bin'] = pd.cut(data['age'], bins=range(0, 101, 10))
data['income_bin'] = pd.cut(data['income'], bins=range(0, 100001, 10000))

# 最优分箱
from scipy import stats
def chi_merge(df, var, target):
    df1 = df[[var, target]].copy()
    df1['cnt'] = 1
    df2 = df1.groupby(var).agg({target: 'sum', 'cnt': 'count'}).reset_index()
    df2.columns = [var, 'bad', 'cnt']
    df2['good'] = df2['cnt'] - df2['bad']
    df2['bad_rate'] = df2['bad'] / df2['cnt']
    df2['woe'] = np.log((df2['bad'] / df2['bad'].sum()) / (df2['good'] / df2['good'].sum()))
    df2['iv'] = (df2['bad'] / df2['bad'].sum() - df2['good'] / df2['good'].sum()) * df2['woe']
    df2 = df2.sort_values(by=var, ascending=True).reset_index(drop=True)
    while True:
        min_iv = df2['iv'].min()
        if min_iv == 0:
            break
        idx = df2[df2['iv'] == min_iv].index[0]
        if idx == 0:
            df2.loc[idx, var] = (df2.loc[idx, var] + df2.loc[idx + 1, var]) / 2
        elif idx == len(df2) - 1:
            df2.loc[idx, var] = (df2.loc[idx, var] + df2.loc[idx - 1, var]) / 2
        else:
            iv_left = df2.loc[idx - 1, 'iv']
            iv_cur = df2.loc[idx, 'iv']
            iv_right = df2.loc[idx + 1, 'iv']
            if iv_left <= iv_cur and iv_left <= iv_right:
                df2.loc[idx - 1, var] = (df2.loc[idx - 1, var] * df2.loc[idx - 1, 'cnt'] + 
                                         df2.loc[idx, var] * df2.loc[idx, 'cnt']) / (df2.loc[idx - 1, 'cnt'] + df2.loc[idx, 'cnt'])
                df2.loc[idx - 1, 'bad'] = df2.loc[idx - 1:idx, 'bad'].sum()
                df2.loc[idx - 1, 'good'] = df2.loc[idx - 1:idx, 'good'].sum()
                df2.loc[idx - 1, 'cnt'] = df2.loc[idx - 1:idx, 'cnt'].sum()
                df2 = df2.drop(idx).reset_index(drop=True)
            elif iv_right <= iv_cur and iv_right <= iv_left:
                df2.loc[idx, var] = (df2.loc[idx, var] * df2.loc[idx, 'cnt'] + 
                                     df2.loc[idx + 1, var] * df2.loc[idx + 1, 'cnt']) / (df2.loc[idx, 'cnt'] + df2.loc[idx + 1, 'cnt'])
                df2.loc[idx, 'bad'] = df2.loc[idx:idx + 1, 'bad'].sum()
                df2.loc[idx, 'good'] = df2.loc[idx:idx + 1, 'good'].sum()
                df2.loc[idx, 'cnt'] = df2.loc[idx:idx + 1, 'cnt'].sum()
                df2 = df2.drop(idx + 1).reset_index(drop=True)
            else:
                break
    return df2

data['age_bin'] = chi_merge(data, 'age', 'default')['age']
data['income_bin'] = chi_merge(data, 'income', 'default')['income']

5. 特征筛选

我们需要选择与目标变量相关性较高的特征。常用的特征选择方法有方差分析 (ANOVA)、卡方检验、互信息和 L1 正则化等。

# 方差分析 (ANOVA)
from scipy.stats import f_oneway
for var in ['age_bin', 'income_bin']:
    f, p = f_oneway(data[var][data['default'] == 0], data[var][data['default'] == 1])
    print(var, p)

# 卡方检验
from scipy.stats import chi2_contingency
for var in ['gender', 'education', 'marriage']:
    table = pd.crosstab(data[var], data['default'])
    chi2, p, dof, expected = chi2_contingency(table)
    print(var, p)

# 互信息
from sklearn.feature_selection import mutual_info_classif
X = data[['age_bin', 'income_bin', 'gender', 'education', 'marriage']]
y = data['default']
mutual_info = mutual_info_classif(X, y)
for i, var in enumerate(X.columns):
    print(var, mutual_info[i])

# L1 正则化
X = pd.get_dummies(data[['age_bin', 'income_bin', 'gender', 'education', 'marriage']])
y = data['default']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
model = LogisticRegression(penalty='l1', solver='liblinear')
model.fit(X_train, y_train)
coef = model.coef_[0]
for i, var in enumerate(X.columns):
    print(var, coef[i])

6. WoE 转换

我们需要将特征值转换为 WoE (Weight of Evidence),以便于后续建模和评分卡制作。同时,我们需要计算 IV (Information Value) 来评估每个特征对目标变量的影响程度。

def calc_woe(df, var, target):
    df1 = df[[var, target]].copy()
    df1['cnt'] = 1
    df2 = df1.groupby(var).agg({target: 'sum', 'cnt': 'count'}).reset_index()
    df2.columns = [var, 'bad', 'cnt']
    df2['good'] = df2['cnt'] - df2['bad']
    df2['bad_rate'] = df2['bad'] / df2['bad'].sum()
    df2['good_rate'] = df2['good'] / df2['good'].sum()
    df2['woe'] = np.log(df2['good_rate'] / df2['bad_rate'])
    df2['iv'] = (df2['good_rate'] - df2['bad_rate']) * df2['woe']
    return df2[['var', 'woe', 'iv']]

def calc_iv(df, var, target):
    df1 = df[[var, target]].copy()
    df1['cnt'] = 1
    df2 = df1.groupby(var).agg({target: 'sum', 'cnt': 'count'}).reset_index()
    df2.columns = [var, 'bad', 'cnt']
    df2['good'] = df2['cnt'] - df2['bad']
    df2['bad_rate'] = df2['bad'] / df2['bad'].sum()
    df2['good_rate'] = df2['good'] / df2['good'].sum()
    df2['woe'] = np.log(df2['good_rate'] / df2['bad_rate'])
    df2['iv'] = (df2['good_rate'] - df2['bad_rate']) * df2['woe']
    return df2['iv'].sum()

woe_vars = []
iv_s = []
for var in ['age_bin', 'income_bin', 'gender', 'education', 'marriage']:
    woe_df = calc_woe(data, var, 'default')
    woe_var = var + '_woe'
    data[woe_var] = pd.merge(data[var], woe_df, on=var, how='left')['woe']
    woe_vars.append(woe_var)
    iv = calc_iv(data, var, 'default')
    iv_s.append(iv)

iv_df = pd.DataFrame({'var': ['age_bin', 'income_bin', 'gender', 'education', 'marriage'], 'iv': iv_s})
print(iv_df)

7. 逻辑回归建模

我们需要使用逻辑回归模型来建立信用评分卡。我们可以使用 sklearn 库中的 LogisticRegression 类来完成逻辑回归建模。

X = data[woe_vars]
y = data['default']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
model = LogisticRegression(penalty='l2', solver='liblinear')
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
auc = roc_auc_score(y_test, y_pred)
print('Confusion Matrix:
', cm)
print('AUC:', auc)

8. 评分卡转换

我们需要将模型转换为评分卡。评分卡是一种将模型分数转换为易于理解和使用的分数的方法。常用的评分卡转换方法有等宽分数、基于正态分布的分数和基于 odds 的分数等。

base_score = 500
pdo = 20
factor = pdo / np.log(2)
offset = base_score - factor * np.log(1 / 60)
coefficients = model.coef_[0]
scores = []
for i, var in enumerate(woe_vars):
    woe_df = calc_woe(data, var.replace('_woe', ''), 'default')
    woe_dict = dict(zip(woe_df[var.replace('_woe', '')], woe_df['woe']))
    for val, woe in woe_dict.items():
        score = round(factor * woe * coefficients[i], 0)
        scores.append((var.replace('_woe', ''), val, woe, coefficients[i], score))
scores_df = pd.DataFrame(scores, columns=['var', 'val', 'woe', 'coef', 'score'])
scores_df['score'] = scores_df['score'].astype(int)
scores_df.to_csv('scores.csv', index=False)

9. 输出每个特征分箱对应的评分

我们可以输出每个特征分箱对应的评分,以便于我们在实际应用中使用。

for var in ['age_bin', 'income_bin', 'gender', 'education', 'marriage']:
    woe_df = calc_woe(data, var, 'default')
    woe_var = var + '_woe'
    data[woe_var] = pd.merge(data[var], woe_df, on=var, how='left')['woe']
    woe_dict = dict(zip(woe_df[var], woe_df['woe']))
    print(var)
    for val, woe in woe_dict.items():
        score = round(factor * woe * coefficients[woe_vars.index(woe_var)], 0)
        print(val, score)

10. 同时包含 OOT 也计算,计算 PSI

我们需要使用 out-of-time (OOT) 数据集来验证模型的稳定性,并计算 PSI (Population Stability Index) 来评估模型在不同时间段和人群群体中的稳定性。

oot_data = pd.read_csv('oot_data.csv')
oot_data['age_bin'] = chi_merge(oot_data, 'age', 'default')['age']
oot_data['income_bin'] = chi_merge(oot_data, 'income', 'default')['income']
for var in ['age_bin', 'income_bin', 'gender', 'education', 'marriage']:
    woe_df = calc_woe(data, var, 'default')
    woe_var = var + '_woe'
    data[woe_var] = pd.merge(data[var], woe_df, on=var, how='left')['woe']
    oot_data[woe_var] = pd.merge(oot_data[var], woe_df, on=var, how='left')['woe']
X_train = data[woe_vars]
y_train = data['default']
X_oot = oot_data[woe_vars]
y_oot = oot_data['default']
model = LogisticRegression(penalty='l2', solver='liblinear')
model.fit(X_train, y_train)
y_pred_train = model.predict_proba(X_train)[:, 1]
y_pred_oot = model.predict_proba(X_oot)[:, 1]
psi = np.sum((y_pred_train - y_pred_oot) * np.log(y_pred_train / y_pred_oot))
print('PSI:', psi)

注意:

  • 本脚本仅供参考,需要根据实际情况进行调整。
  • 请确保数据集中包含目标变量 (例如 'default') 和用于构建评分卡的特征 (例如 'age', 'income', 'gender', 'education', 'marriage')。
  • 'data.csv' 和 'oot_data.csv' 是数据集的文件名,请根据实际情况修改。
  • 评分卡的具体参数 (例如 base_score, pdo) 可以在实际应用中根据业务需求进行调整。
使用逻辑回归构建信用评分卡全流程 Python 脚本 - 包含数据预处理、特征工程、模型训练和评分卡制作

原文地址: https://www.cveoy.top/t/topic/nC77 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录