前言
💖💖作者:计算机程序员小杨 💙💙个人简介:我是一名计算机相关专业的从业者,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。热爱技术,喜欢钻研新工具和框架,也乐于通过代码解决实际问题,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💕💕文末获取源码联系 计算机程序员小杨 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目 计算机毕业设计选题 💜💜
一.开发工具简介
大数据框架:Hadoop+Spark(本次没用Hive,支持定制) 开发语言:Python+Java(两个版本都支持) 后端框架:Django+Spring Boot(Spring+SpringMVC+Mybatis)(两个版本都支持) 前端:Vue+ElementUI+Echarts+HTML+CSS+JavaScript+jQuery 详细技术点:Hadoop、HDFS、Spark、Spark SQL、Pandas、NumPy 数据库:MySQL
二.系统内容简介
青少年抑郁症风险数据分析可视化系统是一个基于大数据技术构建的心理健康分析平台,采用Hadoop+Spark分布式计算框架处理海量青少年心理健康数据。系统后端基于Django框架开发,前端采用Vue+ElementUI+Echarts技术栈实现交互界面与数据可视化。平台集成了用户管理、数据录入、多维度分析等功能模块,支持从抑郁程度分布、年龄段、性别、家族病史、睡眠时长、体育锻炼、社交媒体使用、社会支持、社会经济状况、当前身份、音乐偏好、吸烟习惯等13个维度对青少年抑郁症风险进行深度挖掘。系统利用Spark SQL进行分布式数据查询与聚合运算,结合Pandas和NumPy进行统计分析,将分析结果以图表形式在可视化大屏中直观呈现。通过HDFS存储原始数据,MySQL管理业务数据,实现了从数据采集、存储、计算到可视化展示的完整数据处理链路,为学校心理健康工作者和研究人员提供数据支持工具。
三.系统功能演示
四.系统界面展示
五.系统源码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, avg, when, sum as spark_sum, round as spark_round
from django.http import JsonResponse
from django.views import View
from decimal import Decimal
import json
spark = SparkSession.builder.appName("DepressionAnalysis").config("spark.sql.warehouse.dir", "/user/hive/warehouse").config("spark.driver.memory", "2g").config("spark.executor.memory", "2g").getOrCreate()
class DepressionDegreeAnalysisView(View):
def get(self, request):
try:
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/depression_db").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "depression_data").option("user", "root").option("password", "password").load()
df.createOrReplaceTempView("depression_table")
degree_distribution = spark.sql("SELECT depression_degree, COUNT(*) as count FROM depression_table GROUP BY depression_degree ORDER BY depression_degree")
total_count = df.count()
degree_stats = degree_distribution.withColumn("percentage", spark_round((col("count") / total_count) * 100, 2))
result_data = degree_stats.collect()
distribution_list = [{"degree": row["depression_degree"], "count": int(row["count"]), "percentage": float(row["percentage"])} for row in result_data]
severity_mapping = {"无抑郁": 0, "轻度抑郁": 1, "中度抑郁": 2, "重度抑郁": 3}
high_risk_count = df.filter((col("depression_degree") == "中度抑郁") | (col("depression_degree") == "重度抑郁")).count()
high_risk_rate = round(float(high_risk_count / total_count * 100), 2) if total_count > 0 else 0
avg_score_df = spark.sql("SELECT AVG(CASE WHEN depression_degree='无抑郁' THEN 0 WHEN depression_degree='轻度抑郁' THEN 1 WHEN depression_degree='中度抑郁' THEN 2 WHEN depression_degree='重度抑郁' THEN 3 END) as avg_severity FROM depression_table")
avg_severity = round(float(avg_score_df.collect()[0]["avg_severity"]), 2)
return JsonResponse({"status": "success", "data": {"distribution": distribution_list, "total_count": int(total_count), "high_risk_rate": high_risk_rate, "avg_severity": avg_severity}})
except Exception as e:
return JsonResponse({"status": "error", "message": str(e)})
class MultiFactorCorrelationAnalysisView(View):
def post(self, request):
try:
params = json.loads(request.body)
primary_factor = params.get("primary_factor", "sleep_hours")
secondary_factor = params.get("secondary_factor", "exercise_frequency")
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/depression_db").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "depression_data").option("user", "root").option("password", "password").load()
df.createOrReplaceTempView("depression_multi_factor")
df_with_score = df.withColumn("depression_score", when(col("depression_degree") == "无抑郁", 0).when(col("depression_degree") == "轻度抑郁", 1).when(col("depression_degree") == "中度抑郁", 2).when(col("depression_degree") == "重度抑郁", 3).otherwise(0))
correlation_query = f"SELECT {primary_factor}, {secondary_factor}, AVG(depression_score) as avg_score, COUNT(*) as sample_count FROM (SELECT *, CASE WHEN depression_degree='无抑郁' THEN 0 WHEN depression_degree='轻度抑郁' THEN 1 WHEN depression_degree='中度抑郁' THEN 2 WHEN depression_degree='重度抑郁' THEN 3 END as depression_score FROM depression_multi_factor) GROUP BY {primary_factor}, {secondary_factor} ORDER BY {primary_factor}, {secondary_factor}"
correlation_result = spark.sql(correlation_query)
result_data = correlation_result.collect()
matrix_data = []
for row in result_data:
matrix_data.append({"primary_value": str(row[primary_factor]), "secondary_value": str(row[secondary_factor]), "avg_score": round(float(row["avg_score"]), 2), "sample_count": int(row["sample_count"])})
primary_factor_impact = spark.sql(f"SELECT {primary_factor}, AVG(depression_score) as avg_score, COUNT(*) as count FROM (SELECT *, CASE WHEN depression_degree='无抑郁' THEN 0 WHEN depression_degree='轻度抑郁' THEN 1 WHEN depression_degree='中度抑郁' THEN 2 WHEN depression_degree='重度抑郁' THEN 3 END as depression_score FROM depression_multi_factor) GROUP BY {primary_factor} ORDER BY avg_score DESC")
primary_impact_list = [{"value": str(row[primary_factor]), "avg_score": round(float(row["avg_score"]), 2), "count": int(row["count"])} for row in primary_factor_impact.collect()]
secondary_factor_impact = spark.sql(f"SELECT {secondary_factor}, AVG(depression_score) as avg_score, COUNT(*) as count FROM (SELECT *, CASE WHEN depression_degree='无抑郁' THEN 0 WHEN depression_degree='轻度抑郁' THEN 1 WHEN depression_degree='中度抑郁' THEN 2 WHEN depression_degree='重度抑郁' THEN 3 END as depression_score FROM depression_multi_factor) GROUP BY {secondary_factor} ORDER BY avg_score DESC")
secondary_impact_list = [{"value": str(row[secondary_factor]), "avg_score": round(float(row["avg_score"]), 2), "count": int(row["count"])} for row in secondary_factor_impact.collect()]
return JsonResponse({"status": "success", "data": {"matrix": matrix_data, "primary_factor_impact": primary_impact_list, "secondary_factor_impact": secondary_impact_list, "factors": {"primary": primary_factor, "secondary": secondary_factor}}})
except Exception as e:
return JsonResponse({"status": "error", "message": str(e)})
class RiskPredictionModelView(View):
def post(self, request):
try:
params = json.loads(request.body)
age = params.get("age")
gender = params.get("gender")
sleep_hours = params.get("sleep_hours")
exercise_frequency = params.get("exercise_frequency")
social_media_hours = params.get("social_media_hours")
family_history = params.get("family_history")
social_support = params.get("social_support")
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/depression_db").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "depression_data").option("user", "root").option("password", "password").load()
df_with_score = df.withColumn("depression_score", when(col("depression_degree") == "无抑郁", 0).when(col("depression_degree") == "轻度抑郁", 1).when(col("depression_degree") == "中度抑郁", 2).when(col("depression_degree") == "重度抑郁", 3).otherwise(0))
df_with_score.createOrReplaceTempView("risk_prediction_table")
similar_samples = df_with_score.filter((col("age") == age) & (col("gender") == gender) & (col("sleep_hours") == sleep_hours) & (col("exercise_frequency") == exercise_frequency))
if similar_samples.count() == 0:
similar_samples = df_with_score.filter((col("age") == age) & (col("gender") == gender))
if similar_samples.count() == 0:
similar_samples = df_with_score
avg_risk_score = similar_samples.agg(avg("depression_score")).collect()[0][0]
risk_score = round(float(avg_risk_score) if avg_risk_score else 0, 2)
risk_factors = []
sleep_threshold_df = spark.sql("SELECT AVG(depression_score) as avg_score FROM risk_prediction_table WHERE sleep_hours < 6")
sleep_avg = sleep_threshold_df.collect()[0]["avg_score"]
if sleep_hours < 6 and sleep_avg and float(sleep_avg) > 1.5:
risk_factors.append({"factor": "睡眠不足", "impact": "高", "description": f"您的睡眠时长为{sleep_hours}小时,低于6小时的群体平均抑郁评分为{round(float(sleep_avg), 2)}"})
exercise_threshold_df = spark.sql("SELECT AVG(depression_score) as avg_score FROM risk_prediction_table WHERE exercise_frequency='从不'")
exercise_avg = exercise_threshold_df.collect()[0]["avg_score"]
if exercise_frequency == "从不" and exercise_avg and float(exercise_avg) > 1.0:
risk_factors.append({"factor": "缺乏锻炼", "impact": "中", "description": f"从不锻炼的群体平均抑郁评分为{round(float(exercise_avg), 2)}"})
social_media_threshold_df = spark.sql("SELECT AVG(depression_score) as avg_score FROM risk_prediction_table WHERE social_media_hours > 5")
social_media_avg = social_media_threshold_df.collect()[0]["avg_score"]
if social_media_hours > 5 and social_media_avg and float(social_media_avg) > 1.3:
risk_factors.append({"factor": "社交媒体过度使用", "impact": "中", "description": f"每日使用超过5小时的群体平均抑郁评分为{round(float(social_media_avg), 2)}"})
family_history_df = spark.sql("SELECT AVG(depression_score) as avg_score FROM risk_prediction_table WHERE family_history='有'")
family_avg = family_history_df.collect()[0]["avg_score"]
if family_history == "有" and family_avg and float(family_avg) > 1.5:
risk_factors.append({"factor": "家族病史", "impact": "高", "description": f"有家族病史的群体平均抑郁评分为{round(float(family_avg), 2)}"})
risk_level = "低风险" if risk_score < 1 else "中风险" if risk_score < 2 else "高风险"
suggestions = []
if risk_score >= 1.5:
suggestions.append("建议咨询专业心理医生进行评估")
if sleep_hours < 6:
suggestions.append("建议保证每日7-8小时睡眠")
if exercise_frequency in ["从不", "很少"]:
suggestions.append("建议每周至少进行3次体育锻炼")
if social_media_hours > 4:
suggestions.append("建议减少社交媒体使用时间,增加线下社交")
return JsonResponse({"status": "success", "data": {"risk_score": risk_score, "risk_level": risk_level, "risk_factors": risk_factors, "suggestions": suggestions, "similar_sample_count": similar_samples.count()}})
except Exception as e:
return JsonResponse({"status": "error", "message": str(e)})
六.系统文档展示
结束
💕💕文末获取源码联系 计算机程序员小杨