前言
💖💖作者:计算机程序员小杨 💙💙个人简介:我是一名计算机相关专业的从业者,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。热爱技术,喜欢钻研新工具和框架,也乐于通过代码解决实际问题,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💕💕文末获取源码联系 计算机程序员小杨 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目 计算机毕业设计选题 💜💜
一.开发工具简介
大数据框架:Hadoop+Spark(本次没用Hive,支持定制) 开发语言:Python+Java(两个版本都支持) 后端框架:Django+Spring Boot(Spring+SpringMVC+Mybatis)(两个版本都支持) 前端:Vue+ElementUI+Echarts+HTML+CSS+JavaScript+jQuery 详细技术点:Hadoop、HDFS、Spark、Spark SQL、Pandas、NumPy 数据库:MySQL
二.系统内容简介
基于大数据的消费者信用评分画像数据分析与可视化系统是一个综合运用现代大数据技术栈的完整解决方案,该系统采用Hadoop分布式存储架构结合Spark大数据计算引擎,实现对海量消费者信用数据的高效处理与深度分析。系统后端基于Python语言开发,使用Django框架构建稳定的Web服务,前端采用Vue.js配合ElementUI组件库打造现代化用户界面,通过Echarts图表库实现丰富的数据可视化效果。系统核心功能涵盖信用数据管理、用户画像分析、消费行为分析、信用评分分析、生活偏好分析、用户分群画像以及可视化大屏展示等九大模块,通过Spark SQL进行复杂数据查询,结合Pandas和NumPy进行数据科学计算,将原始的消费者行为数据转化为直观的信用评分和用户画像,为金融机构和相关企业提供科学的信用评估依据,同时通过MySQL数据库确保数据的持久化存储和高效访问。
三.系统功能演示
大数据导师强烈推荐:基于Hadoop+Spark的消费者信用画像分析可视化系统技术含量十足
四.系统界面展示
五.系统源码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, when, avg, count, sum as spark_sum, stddev, percentile_approx
from pyspark.ml.feature import VectorAssembler, StandardScaler
from pyspark.ml.clustering import KMeans
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views import View
import json
spark = SparkSession.builder.appName("ConsumerCreditAnalysis").config("spark.sql.adaptive.enabled", "true").getOrCreate()
def user_portrait_analysis(request):
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/credit_db").option("dbtable", "consumer_data").option("user", "root").option("password", "password").load()
age_stats = df.groupBy("age_group").agg(avg("credit_score").alias("avg_credit"), count("*").alias("user_count"), avg("monthly_income").alias("avg_income"))
gender_distribution = df.groupBy("gender").agg(count("*").alias("count"), avg("credit_score").alias("avg_score"))
education_analysis = df.groupBy("education_level").agg(avg("credit_score").alias("avg_credit"), avg("loan_amount").alias("avg_loan"), count("*").alias("total_users"))
occupation_stats = df.groupBy("occupation").agg(avg("credit_score").alias("avg_score"), avg("monthly_income").alias("avg_income"), stddev("credit_score").alias("score_stddev"))
marital_analysis = df.groupBy("marital_status").agg(count("*").alias("count"), avg("credit_score").alias("avg_score"), avg("debt_ratio").alias("avg_debt_ratio"))
regional_distribution = df.groupBy("region").agg(count("*").alias("user_count"), avg("credit_score").alias("regional_avg_score"))
credit_history_stats = df.groupBy("credit_history_length").agg(avg("credit_score").alias("avg_score"), count("*").alias("count"))
income_brackets = df.withColumn("income_bracket", when(col("monthly_income") < 5000, "Low").when(col("monthly_income") < 15000, "Medium").otherwise("High"))
income_analysis = income_brackets.groupBy("income_bracket").agg(avg("credit_score").alias("avg_score"), count("*").alias("count"), avg("loan_default_rate").alias("avg_default_rate"))
age_stats_pandas = age_stats.toPandas()
gender_dist_pandas = gender_distribution.toPandas()
education_pandas = education_analysis.toPandas()
occupation_pandas = occupation_stats.toPandas()
marital_pandas = marital_analysis.toPandas()
regional_pandas = regional_distribution.toPandas()
credit_history_pandas = credit_history_stats.toPandas()
income_pandas = income_analysis.toPandas()
result_data = {"age_stats": age_stats_pandas.to_dict("records"), "gender_distribution": gender_dist_pandas.to_dict("records"), "education_analysis": education_pandas.to_dict("records"), "occupation_stats": occupation_pandas.to_dict("records"), "marital_analysis": marital_pandas.to_dict("records"), "regional_distribution": regional_pandas.to_dict("records"), "credit_history_stats": credit_history_pandas.to_dict("records"), "income_analysis": income_pandas.to_dict("records")}
return JsonResponse({"status": "success", "data": result_data})
def consumption_behavior_analysis(request):
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/credit_db").option("dbtable", "transaction_data").option("user", "root").option("password", "password").load()
monthly_spending = df.groupBy("user_id", "transaction_month").agg(spark_sum("transaction_amount").alias("monthly_total"), count("*").alias("transaction_count"), avg("transaction_amount").alias("avg_transaction"))
category_analysis = df.groupBy("user_id", "category").agg(spark_sum("transaction_amount").alias("category_total"), count("*").alias("category_count"))
category_pivot = category_analysis.groupBy("user_id").pivot("category").agg(spark_sum("category_total"))
spending_patterns = df.groupBy("user_id").agg(spark_sum("transaction_amount").alias("total_spending"), count("*").alias("total_transactions"), avg("transaction_amount").alias("avg_spending"), stddev("transaction_amount").alias("spending_volatility"))
time_analysis = df.groupBy("user_id", "transaction_hour").agg(count("*").alias("hourly_count"), avg("transaction_amount").alias("hourly_avg"))
weekend_weekday = df.withColumn("day_type", when(col("day_of_week").isin([1, 7]), "Weekend").otherwise("Weekday"))
day_type_analysis = weekend_weekday.groupBy("user_id", "day_type").agg(spark_sum("transaction_amount").alias("day_type_total"), count("*").alias("day_type_count"))
high_value_transactions = df.filter(col("transaction_amount") > 1000).groupBy("user_id").agg(count("*").alias("high_value_count"), spark_sum("transaction_amount").alias("high_value_total"))
merchant_diversity = df.groupBy("user_id").agg(count("merchant_id").alias("unique_merchants"), count("*").alias("total_transactions"))
spending_frequency = df.groupBy("user_id").agg(count("*").alias("transaction_frequency"), spark_sum("transaction_amount").alias("total_amount"))
seasonal_analysis = df.groupBy("user_id", "season").agg(spark_sum("transaction_amount").alias("seasonal_total"), avg("transaction_amount").alias("seasonal_avg"))
monthly_pandas = monthly_spending.toPandas()
category_pandas = category_analysis.toPandas()
patterns_pandas = spending_patterns.toPandas()
time_pandas = time_analysis.toPandas()
day_type_pandas = day_type_analysis.toPandas()
high_value_pandas = high_value_transactions.toPandas()
merchant_pandas = merchant_diversity.toPandas()
frequency_pandas = spending_frequency.toPandas()
seasonal_pandas = seasonal_analysis.toPandas()
behavior_data = {"monthly_spending": monthly_pandas.to_dict("records"), "category_analysis": category_pandas.to_dict("records"), "spending_patterns": patterns_pandas.to_dict("records"), "time_analysis": time_pandas.to_dict("records"), "day_type_analysis": day_type_pandas.to_dict("records"), "high_value_transactions": high_value_pandas.to_dict("records"), "merchant_diversity": merchant_pandas.to_dict("records"), "spending_frequency": frequency_pandas.to_dict("records"), "seasonal_analysis": seasonal_pandas.to_dict("records")}
return JsonResponse({"status": "success", "data": behavior_data})
def user_clustering_analysis(request):
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/credit_db").option("dbtable", "user_features").option("user", "root").option("password", "password").load()
feature_cols = ["credit_score", "monthly_income", "debt_ratio", "loan_amount", "transaction_frequency", "avg_transaction_amount", "credit_history_months", "number_of_accounts"]
assembler = VectorAssembler(inputCols=feature_cols, outputCol="features")
df_assembled = assembler.transform(df)
scaler = StandardScaler(inputCol="features", outputCol="scaled_features", withStd=True, withMean=True)
scaler_model = scaler.fit(df_assembled)
df_scaled = scaler_model.transform(df_assembled)
kmeans = KMeans(k=5, seed=42, featuresCol="scaled_features", predictionCol="cluster")
kmeans_model = kmeans.fit(df_scaled)
df_clustered = kmeans_model.transform(df_scaled)
cluster_summary = df_clustered.groupBy("cluster").agg(count("*").alias("cluster_size"), avg("credit_score").alias("avg_credit_score"), avg("monthly_income").alias("avg_income"), avg("debt_ratio").alias("avg_debt_ratio"), avg("loan_amount").alias("avg_loan"), avg("transaction_frequency").alias("avg_frequency"))
cluster_profiles = df_clustered.groupBy("cluster").agg(avg("credit_score").alias("credit_score"), avg("monthly_income").alias("income"), avg("debt_ratio").alias("debt_ratio"), avg("age").alias("avg_age"), count("*").alias("size"))
high_risk_users = df_clustered.filter((col("credit_score") < 600) | (col("debt_ratio") > 0.8)).select("user_id", "cluster", "credit_score", "debt_ratio", "monthly_income")
premium_users = df_clustered.filter((col("credit_score") > 750) & (col("monthly_income") > 10000)).select("user_id", "cluster", "credit_score", "monthly_income", "loan_amount")
cluster_characteristics = df_clustered.groupBy("cluster").agg(percentile_approx("credit_score", 0.5).alias("median_credit"), percentile_approx("monthly_income", 0.5).alias("median_income"), stddev("credit_score").alias("credit_std"))
age_cluster_analysis = df_clustered.groupBy("cluster", "age_group").agg(count("*").alias("age_count"))
gender_cluster_analysis = df_clustered.groupBy("cluster", "gender").agg(count("*").alias("gender_count"))
education_cluster_analysis = df_clustered.groupBy("cluster", "education_level").agg(count("*").alias("education_count"))
cluster_risk_analysis = df_clustered.groupBy("cluster").agg(avg("loan_default_probability").alias("avg_default_prob"), count("*").alias("total_users"))
summary_pandas = cluster_summary.toPandas()
profiles_pandas = cluster_profiles.toPandas()
high_risk_pandas = high_risk_users.toPandas()
premium_pandas = premium_users.toPandas()
characteristics_pandas = cluster_characteristics.toPandas()
age_cluster_pandas = age_cluster_analysis.toPandas()
gender_cluster_pandas = gender_cluster_analysis.toPandas()
education_cluster_pandas = education_cluster_analysis.toPandas()
risk_analysis_pandas = cluster_risk_analysis.toPandas()
clustering_result = {"cluster_summary": summary_pandas.to_dict("records"), "cluster_profiles": profiles_pandas.to_dict("records"), "high_risk_users": high_risk_pandas.to_dict("records"), "premium_users": premium_pandas.to_dict("records"), "cluster_characteristics": characteristics_pandas.to_dict("records"), "age_cluster_distribution": age_cluster_pandas.to_dict("records"), "gender_cluster_distribution": gender_cluster_pandas.to_dict("records"), "education_cluster_distribution": education_cluster_pandas.to_dict("records"), "cluster_risk_analysis": risk_analysis_pandas.to_dict("records")}
return JsonResponse({"status": "success", "data": clustering_result})
六.系统文档展示
结束
💕💕文末获取源码联系 计算机程序员小杨