💖💖作者:计算机毕业设计杰瑞 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学校实战项目 计算机毕业设计选题推荐
基于大数据的用户贷款行为数据分析系统介绍
用户贷款行为数据分析系统是一个基于大数据技术构建的智能化金融数据分析平台,采用Hadoop+Spark大数据框架作为核心技术架构,结合Django后端框架和Vue前端技术实现完整的数据分析解决方案。系统运用Spark SQL进行高效的数据查询和分析处理,通过Pandas和NumPy进行数据预处理和统计分析,将复杂的贷款用户行为数据转化为直观的可视化图表和分析报告。平台包含用户贷款行为数据管理、收入年龄分析、职业工作分析、模型效果分析、地理位置分析、用户画像分析等七大核心功能模块,每个模块都采用Echarts图表库实现丰富的数据可视化展示效果。系统通过HDFS分布式文件系统存储海量贷款数据,利用Spark的内存计算优势实现快速的数据分析和挖掘,为金融机构的风险评估和业务决策提供数据支撑,展现了现代大数据技术在金融领域的实际应用价值。
基于大数据的用户贷款行为数据分析系统演示视频
基于大数据的用户贷款行为数据分析系统演示图片
基于大数据的用户贷款行为数据分析系统代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, avg, sum, when, desc, asc
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType
import pandas as pd
import numpy as np
spark = SparkSession.builder.appName("UserLoanAnalysis").config("spark.sql.adaptive.enabled", "true").getOrCreate()
def income_age_analysis(request):
loan_df = spark.sql("SELECT age, income, loan_amount, loan_status FROM user_loan_data WHERE age IS NOT NULL AND income > 0")
age_groups = loan_df.withColumn("age_group",
when(col("age") < 25, "18-24")
.when((col("age") >= 25) & (col("age") < 35), "25-34")
.when((col("age") >= 35) & (col("age") < 45), "35-44")
.when((col("age") >= 45) & (col("age") < 55), "45-54")
.otherwise("55+"))
income_stats = age_groups.groupBy("age_group").agg(
avg("income").alias("avg_income"),
count("*").alias("user_count"),
sum("loan_amount").alias("total_loan"),
avg("loan_amount").alias("avg_loan"),
sum(when(col("loan_status") == "approved", 1).otherwise(0)).alias("approved_count")
).orderBy("age_group")
income_ranges = loan_df.withColumn("income_range",
when(col("income") < 50000, "低收入(<5万)")
.when((col("income") >= 50000) & (col("income") < 100000), "中低收入(5-10万)")
.when((col("income") >= 100000) & (col("income") < 200000), "中等收入(10-20万)")
.when((col("income") >= 200000) & (col("income") < 500000), "中高收入(20-50万)")
.otherwise("高收入(50万+)"))
income_analysis = income_ranges.groupBy("income_range").agg(
count("*").alias("user_count"),
avg("age").alias("avg_age"),
sum(when(col("loan_status") == "approved", 1).otherwise(0)).alias("approved_loans"),
(sum(when(col("loan_status") == "approved", 1).otherwise(0)) * 100.0 / count("*")).alias("approval_rate")
).orderBy(desc("user_count"))
correlation_data = loan_df.select("age", "income", "loan_amount").toPandas()
age_income_corr = correlation_data["age"].corr(correlation_data["income"])
income_loan_corr = correlation_data["income"].corr(correlation_data["loan_amount"])
risk_analysis = loan_df.withColumn("risk_level",
when((col("income") < 30000) & (col("age") < 25), "高风险")
.when((col("income") >= 30000) & (col("income") < 80000) & (col("age") >= 25) & (col("age") < 45), "中等风险")
.otherwise("低风险"))
risk_stats = risk_analysis.groupBy("risk_level").agg(
count("*").alias("total_users"),
sum(when(col("loan_status") == "default", 1).otherwise(0)).alias("default_count"),
(sum(when(col("loan_status") == "default", 1).otherwise(0)) * 100.0 / count("*")).alias("default_rate")
)
result_data = {
'age_income_stats': income_stats.collect(),
'income_range_analysis': income_analysis.collect(),
'correlation_metrics': {'age_income': age_income_corr, 'income_loan': income_loan_corr},
'risk_distribution': risk_stats.collect()
}
return result_data
def occupation_analysis(request):
occupation_df = spark.sql("SELECT occupation, work_experience, income, loan_amount, loan_status, age FROM user_loan_data WHERE occupation IS NOT NULL")
occupation_stats = occupation_df.groupBy("occupation").agg(
count("*").alias("total_users"),
avg("income").alias("avg_income"),
avg("work_experience").alias("avg_experience"),
avg("loan_amount").alias("avg_loan_amount"),
sum(when(col("loan_status") == "approved", 1).otherwise(0)).alias("approved_count"),
sum(when(col("loan_status") == "rejected", 1).otherwise(0)).alias("rejected_count"),
sum(when(col("loan_status") == "default", 1).otherwise(0)).alias("default_count")
).withColumn("approval_rate", (col("approved_count") * 100.0 / col("total_users")))
.withColumn("default_rate", (col("default_count") * 100.0 / col("approved_count")))
.orderBy(desc("total_users"))
experience_groups = occupation_df.withColumn("experience_level",
when(col("work_experience") < 2, "新手(0-2年)")
.when((col("work_experience") >= 2) & (col("work_experience") < 5), "初级(2-5年)")
.when((col("work_experience") >= 5) & (col("work_experience") < 10), "中级(5-10年)")
.when((col("work_experience") >= 10) & (col("work_experience") < 15), "高级(10-15年)")
.otherwise("专家(15年+)"))
experience_analysis = experience_groups.groupBy("experience_level").agg(
count("*").alias("user_count"),
avg("income").alias("avg_income"),
avg("loan_amount").alias("avg_loan"),
sum(when(col("loan_status") == "approved", 1).otherwise(0)).alias("approved_loans"),
(sum(when(col("loan_status") == "approved", 1).otherwise(0)) * 100.0 / count("*")).alias("approval_rate")
).orderBy("experience_level")
high_risk_occupations = occupation_df.filter(col("loan_status") == "default").groupBy("occupation").agg(
count("*").alias("default_count"),
avg("income").alias("avg_defaulter_income"),
avg("work_experience").alias("avg_defaulter_experience")
).orderBy(desc("default_count"))
occupation_income_relation = occupation_df.groupBy("occupation").agg(
avg("income").alias("avg_income"),
count("*").alias("sample_size")
).filter(col("sample_size") >= 10).orderBy(desc("avg_income"))
stable_occupations = occupation_df.filter(col("work_experience") >= 5).groupBy("occupation").agg(
count("*").alias("stable_users"),
avg("income").alias("stable_avg_income"),
sum(when(col("loan_status") == "approved", 1).otherwise(0)).alias("stable_approved"),
(sum(when(col("loan_status") == "approved", 1).otherwise(0)) * 100.0 / count("*")).alias("stable_approval_rate")
).filter(col("stable_users") >= 5).orderBy(desc("stable_approval_rate"))
return {
'occupation_statistics': occupation_stats.collect(),
'experience_analysis': experience_analysis.collect(),
'high_risk_occupations': high_risk_occupations.collect(),
'income_ranking': occupation_income_relation.collect(),
'stable_occupation_performance': stable_occupations.collect()
}
def user_profile_analysis(request):
profile_df = spark.sql("SELECT user_id, age, income, occupation, education, marital_status, loan_amount, loan_purpose, credit_score, loan_status FROM user_loan_data")
demographic_segments = profile_df.withColumn("user_segment",
when((col("age") < 30) & (col("income") < 60000), "年轻低收入群体")
.when((col("age") < 30) & (col("income") >= 60000), "年轻高收入群体")
.when((col("age") >= 30) & (col("age") < 45) & (col("income") < 80000), "中年中等收入群体")
.when((col("age") >= 30) & (col("age") < 45) & (col("income") >= 80000), "中年高收入群体")
.when((col("age") >= 45) & (col("income") < 100000), "成熟稳定群体")
.otherwise("高净值群体"))
segment_analysis = demographic_segments.groupBy("user_segment").agg(
count("*").alias("segment_size"),
avg("loan_amount").alias("avg_loan_preference"),
avg("credit_score").alias("avg_credit_score"),
sum(when(col("loan_status") == "approved", 1).otherwise(0)).alias("approved_users"),
sum(when(col("loan_status") == "default", 1).otherwise(0)).alias("default_users")
).withColumn("approval_rate", (col("approved_users") * 100.0 / col("segment_size")))
.withColumn("risk_rate", (col("default_users") * 100.0 / col("approved_users")))
.orderBy(desc("segment_size"))
education_impact = profile_df.groupBy("education").agg(
count("*").alias("education_count"),
avg("income").alias("avg_income_by_education"),
avg("credit_score").alias("avg_credit_by_education"),
sum(when(col("loan_status") == "approved", 1).otherwise(0)).alias("education_approved"),
(sum(when(col("loan_status") == "approved", 1).otherwise(0)) * 100.0 / count("*")).alias("education_approval_rate")
).orderBy(desc("avg_income_by_education"))
loan_purpose_preference = profile_df.groupBy("loan_purpose", "user_segment").agg(
count("*").alias("purpose_count"),
avg("loan_amount").alias("avg_amount_by_purpose")
).orderBy("user_segment", desc("purpose_count"))
marital_financial_pattern = profile_df.groupBy("marital_status").agg(
count("*").alias("marital_count"),
avg("income").alias("avg_income_by_marital"),
avg("loan_amount").alias("avg_loan_by_marital"),
sum(when(col("loan_status") == "approved", 1).otherwise(0)).alias("marital_approved")
).withColumn("marital_approval_rate", (col("marital_approved") * 100.0 / col("marital_count")))
high_value_customers = profile_df.filter((col("income") > 100000) & (col("credit_score") > 700)).groupBy("occupation", "education").agg(
count("*").alias("high_value_count"),
avg("loan_amount").alias("premium_loan_preference"),
sum(when(col("loan_status") == "approved", 1).otherwise(0)).alias("premium_approved")
).filter(col("high_value_count") >= 3).orderBy(desc("high_value_count"))
return {
'user_segments': segment_analysis.collect(),
'education_insights': education_impact.collect(),
'purpose_preferences': loan_purpose_preference.collect(),
'marital_patterns': marital_financial_pattern.collect(),
'premium_customers': high_value_customers.collect()
基于大数据的用户贷款行为数据分析系统文档展示
💖💖作者:计算机毕业设计杰瑞 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学校实战项目 计算机毕业设计选题推荐