基于大数据的学生抑郁数据分析系统 | 担心数据分析系统太复杂?Hadoop+Spark+Django让开发变简单

44 阅读7分钟

💖💖作者:计算机毕业设计杰瑞 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学校实战项目 计算机毕业设计选题推荐

基于大数据的学生抑郁数据分析系统介绍

学生抑郁数据分析系统是一套基于大数据技术栈构建的心理健康数据处理平台,采用Hadoop分布式存储架构结合Spark大数据计算引擎,实现对学生群体心理状态的深度数据挖掘与智能分析。系统前端采用Vue+ElementUI+Echarts技术栈构建现代化用户界面,后端基于Django框架提供RESTful API服务,数据层面通过MySQL进行结构化存储管理。核心功能模块涵盖学生基础画像分析、学业因素关联分析、生活方式影响评估以及成长背景综合分析,通过Spark SQL进行复杂数据查询与统计计算,利用Pandas和NumPy进行数据预处理与特征工程,最终通过可视化图表展现分析结果。系统支持大规模学生数据的批量导入与实时处理,能够从多维度构建学生心理健康状态评估模型,为教育管理者提供科学的数据支撑和决策参考,同时保障数据隐私安全与系统稳定运行。

基于大数据的学生抑郁数据分析系统演示视频

演示视频

基于大数据的学生抑郁数据分析系统演示图片

在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述在这里插入图片描述 在这里插入图片描述

基于大数据的学生抑郁数据分析系统代码展示

from pyspark.sql import SparkSession
from pyspark.sql.functions import col, when, count, avg, sum, max, min
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.clustering import KMeans
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json

spark = SparkSession.builder.appName("StudentDepressionAnalysis").config("spark.sql.adaptive.enabled","true").getOrCreate()

@csrf_exempt
def student_basic_profile_analysis(request):
    if request.method == 'POST':
        data = json.loads(request.body)
        student_ids = data.get('student_ids', [])
        df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/depression_db").option("dbtable", "student_depression_data").option("user", "root").option("password", "password").load()
        filtered_df = df.filter(col("student_id").isin(student_ids))
        age_distribution = filtered_df.groupBy("age_group").agg(count("*").alias("count")).collect()
        gender_distribution = filtered_df.groupBy("gender").agg(count("*").alias("count")).collect()
        depression_level_stats = filtered_df.groupBy("depression_level").agg(count("*").alias("count"), avg("depression_score").alias("avg_score")).collect()
        grade_analysis = filtered_df.groupBy("grade").agg(count("*").alias("count"), avg("depression_score").alias("avg_score"), max("depression_score").alias("max_score"), min("depression_score").alias("min_score")).collect()
        major_analysis = filtered_df.groupBy("major").agg(count("*").alias("count"), avg("depression_score").alias("avg_score")).orderBy(col("avg_score").desc()).collect()
        hometown_analysis = filtered_df.groupBy("hometown_type").agg(count("*").alias("count"), avg("depression_score").alias("avg_score")).collect()
        family_income_impact = filtered_df.groupBy("family_income_level").agg(count("*").alias("count"), avg("depression_score").alias("avg_score")).orderBy("family_income_level").collect()
        personality_correlation = filtered_df.groupBy("personality_type").agg(count("*").alias("count"), avg("depression_score").alias("avg_score"), avg("social_anxiety_score").alias("avg_anxiety")).collect()
        risk_students = filtered_df.filter(col("depression_score") >= 70).select("student_id", "name", "depression_score", "depression_level").collect()
        result_data = {
            'age_distribution': [{'age_group': row.age_group, 'count': row.count} for row in age_distribution],
            'gender_distribution': [{'gender': row.gender, 'count': row.count} for row in gender_distribution],
            'depression_level_stats': [{'level': row.depression_level, 'count': row.count, 'avg_score': float(row.avg_score)} for row in depression_level_stats],
            'grade_analysis': [{'grade': row.grade, 'count': row.count, 'avg_score': float(row.avg_score), 'max_score': row.max_score, 'min_score': row.min_score} for row in grade_analysis],
            'major_analysis': [{'major': row.major, 'count': row.count, 'avg_score': float(row.avg_score)} for row in major_analysis],
            'hometown_analysis': [{'hometown_type': row.hometown_type, 'count': row.count, 'avg_score': float(row.avg_score)} for row in hometown_analysis],
            'family_income_impact': [{'income_level': row.family_income_level, 'count': row.count, 'avg_score': float(row.avg_score)} for row in family_income_impact],
            'personality_correlation': [{'personality': row.personality_type, 'count': row.count, 'avg_score': float(row.avg_score), 'avg_anxiety': float(row.avg_anxiety)} for row in personality_correlation],
            'risk_students': [{'student_id': row.student_id, 'name': row.name, 'depression_score': row.depression_score, 'level': row.depression_level} for row in risk_students]
        }
        return JsonResponse(result_data)

@csrf_exempt
def academic_factor_analysis(request):
    if request.method == 'POST':
        data = json.loads(request.body)
        semester = data.get('semester', 'current')
        df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/depression_db").option("dbtable", "student_depression_data").option("user", "root").option("password", "password").load()
        filtered_df = df.filter(col("semester") == semester)
        gpa_correlation = filtered_df.select("gpa", "depression_score").toPandas()
        correlation_coeff = np.corrcoef(gpa_correlation['gpa'], gpa_correlation['depression_score'])[0,1]
        gpa_range_analysis = filtered_df.withColumn("gpa_range", when(col("gpa") >= 3.5, "优秀").when(col("gpa") >= 3.0, "良好").when(col("gpa") >= 2.5, "中等").otherwise("待提高")).groupBy("gpa_range").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression"), avg("academic_stress_score").alias("avg_stress")).collect()
        subject_difficulty_impact = filtered_df.groupBy("difficult_subject_count").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression"), avg("study_time_daily").alias("avg_study_time")).collect()
        exam_anxiety_analysis = filtered_df.filter(col("exam_anxiety_score").isNotNull()).groupBy("exam_period").agg(count("*").alias("count"), avg("exam_anxiety_score").alias("avg_anxiety"), avg("depression_score").alias("avg_depression")).collect()
        study_method_effectiveness = filtered_df.groupBy("preferred_study_method").agg(count("*").alias("count"), avg("study_efficiency_score").alias("avg_efficiency"), avg("depression_score").alias("avg_depression")).collect()
        academic_goal_pressure = filtered_df.groupBy("academic_goal_type").agg(count("*").alias("count"), avg("goal_pressure_score").alias("avg_pressure"), avg("depression_score").alias("avg_depression")).collect()
        course_satisfaction_impact = filtered_df.groupBy("course_satisfaction_level").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression"), avg("learning_motivation_score").alias("avg_motivation")).collect()
        teacher_relationship_factor = filtered_df.groupBy("teacher_relationship_score").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression"), avg("academic_confidence_score").alias("avg_confidence")).collect()
        peer_competition_stress = filtered_df.filter(col("peer_competition_score") > 0).groupBy("competition_intensity_level").agg(count("*").alias("count"), avg("peer_competition_score").alias("avg_competition"), avg("depression_score").alias("avg_depression")).collect()
        result_data = {
            'gpa_correlation_coefficient': float(correlation_coeff),
            'gpa_range_analysis': [{'range': row.gpa_range, 'count': row.count, 'avg_depression': float(row.avg_depression), 'avg_stress': float(row.avg_stress)} for row in gpa_range_analysis],
            'subject_difficulty_impact': [{'difficult_count': row.difficult_subject_count, 'count': row.count, 'avg_depression': float(row.avg_depression), 'avg_study_time': float(row.avg_study_time)} for row in subject_difficulty_impact],
            'exam_anxiety_analysis': [{'period': row.exam_period, 'count': row.count, 'avg_anxiety': float(row.avg_anxiety), 'avg_depression': float(row.avg_depression)} for row in exam_anxiety_analysis],
            'study_method_effectiveness': [{'method': row.preferred_study_method, 'count': row.count, 'avg_efficiency': float(row.avg_efficiency), 'avg_depression': float(row.avg_depression)} for row in study_method_effectiveness],
            'academic_goal_pressure': [{'goal_type': row.academic_goal_type, 'count': row.count, 'avg_pressure': float(row.avg_pressure), 'avg_depression': float(row.avg_depression)} for row in academic_goal_pressure],
            'course_satisfaction_impact': [{'satisfaction': row.course_satisfaction_level, 'count': row.count, 'avg_depression': float(row.avg_depression), 'avg_motivation': float(row.avg_motivation)} for row in course_satisfaction_impact],
            'teacher_relationship_factor': [{'relationship_score': row.teacher_relationship_score, 'count': row.count, 'avg_depression': float(row.avg_depression), 'avg_confidence': float(row.avg_confidence)} for row in teacher_relationship_factor],
            'peer_competition_stress': [{'intensity': row.competition_intensity_level, 'count': row.count, 'avg_competition': float(row.avg_competition), 'avg_depression': float(row.avg_depression)} for row in peer_competition_stress]
        }
        return JsonResponse(result_data)

@csrf_exempt
def lifestyle_factor_analysis(request):
    if request.method == 'POST':
        data = json.loads(request.body)
        analysis_period = data.get('period', 'monthly')
        df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/depression_db").option("dbtable", "student_depression_data").option("user", "root").option("password", "password").load()
        sleep_pattern_analysis = df.withColumn("sleep_category", when(col("average_sleep_hours") >= 8, "充足").when(col("average_sleep_hours") >= 6, "正常").otherwise("不足")).groupBy("sleep_category").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression"), avg("sleep_quality_score").alias("avg_quality")).collect()
        exercise_frequency_impact = df.groupBy("exercise_frequency_weekly").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression"), avg("physical_health_score").alias("avg_physical"), avg("mental_energy_score").alias("avg_energy")).collect()
        social_activity_correlation = df.groupBy("social_activity_frequency").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression"), avg("loneliness_score").alias("avg_loneliness"), avg("social_satisfaction_score").alias("avg_satisfaction")).collect()
        diet_habit_influence = df.groupBy("diet_regularity_level").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression"), avg("nutrition_score").alias("avg_nutrition"), avg("energy_level_score").alias("avg_energy")).collect()
        screen_time_effect = df.withColumn("screen_time_category", when(col("daily_screen_hours") >= 8, "过度").when(col("daily_screen_hours") >= 4, "适中").otherwise("较少")).groupBy("screen_time_category").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression"), avg("eye_strain_score").alias("avg_strain")).collect()
        hobby_engagement_benefit = df.filter(col("hobby_time_weekly") > 0).groupBy("hobby_type").agg(count("*").alias("count"), avg("hobby_time_weekly").alias("avg_time"), avg("depression_score").alias("avg_depression"), avg("life_satisfaction_score").alias("avg_satisfaction")).collect()
        substance_use_risk = df.filter(col("substance_use_frequency").isNotNull()).groupBy("substance_type").agg(count("*").alias("count"), avg("substance_use_frequency").alias("avg_frequency"), avg("depression_score").alias("avg_depression"), avg("addiction_risk_score").alias("avg_risk")).collect()
        living_environment_factor = df.groupBy("living_arrangement").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression"), avg("privacy_satisfaction_score").alias("avg_privacy"), avg("roommate_relationship_score").alias("avg_relationship")).collect()
        time_management_skills = df.groupBy("time_management_level").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression"), avg("stress_level_score").alias("avg_stress"), avg("productivity_score").alias("avg_productivity")).collect()
        feature_cols = ['average_sleep_hours', 'exercise_frequency_weekly', 'social_activity_frequency', 'daily_screen_hours', 'hobby_time_weekly']
        assembler = VectorAssembler(inputCols=feature_cols, outputCol="features")
        feature_df = assembler.transform(df.select(feature_cols + ['depression_score']))
        kmeans = KMeans(k=4, seed=42, featuresCol="features")
        model = kmeans.fit(feature_df)
        clustered_df = model.transform(feature_df)
        cluster_analysis = clustered_df.groupBy("prediction").agg(count("*").alias("count"), avg("depression_score").alias("avg_depression")).collect()
        result_data = {
            'sleep_pattern_analysis': [{'category': row.sleep_category, 'count': row.count, 'avg_depression': float(row.avg_depression), 'avg_quality': float(row.avg_quality)} for row in sleep_pattern_analysis],
            'exercise_frequency_impact': [{'frequency': row.exercise_frequency_weekly, 'count': row.count, 'avg_depression': float(row.avg_depression), 'avg_physical': float(row.avg_physical), 'avg_energy': float(row.avg_energy)} for row in exercise_frequency_impact],
            'social_activity_correlation': [{'frequency': row.social_activity_frequency, 'count': row.count, 'avg_depression': float(row.avg_depression), 'avg_loneliness': float(row.avg_loneliness), 'avg_satisfaction': float(row.avg_satisfaction)} for row in social_activity_correlation],
            'diet_habit_influence': [{'regularity': row.diet_regularity_level, 'count': row.count, 'avg_depression': float(row.avg_depression), 'avg_nutrition': float(row.avg_nutrition), 'avg_energy': float(row.avg_energy)} for row in diet_habit_influence],
            'screen_time_effect': [{'category': row.screen_time_category, 'count': row.count, 'avg_depression': float(row.avg_depression), 'avg_strain': float(row.avg_strain)} for row in screen_time_effect],
            'hobby_engagement_benefit': [{'hobby': row.hobby_type, 'count': row.count, 'avg_time': float(row.avg_time), 'avg_depression': float(row.avg_depression), 'avg_satisfaction': float(row.avg_satisfaction)} for row in hobby_engagement_benefit],
            'substance_use_risk': [{'substance': row.substance_type, 'count': row.count, 'avg_frequency': float(row.avg_frequency), 'avg_depression': float(row.avg_depression), 'avg_risk': float(row.avg_risk)} for row in substance_use_risk],
            'living_environment_factor': [{'arrangement': row.living_arrangement, 'count': row.count, 'avg_depression': float(row.avg_depression), 'avg_privacy': float(row.avg_privacy), 'avg_relationship': float(row.avg_relationship)} for row in living_environment_factor],
            'time_management_skills': [{'level': row.time_management_level, 'count': row.count, 'avg_depression': float(row.avg_depression), 'avg_stress': float(row.avg_stress), 'avg_productivity': float(row.avg_productivity)} for row in time_management_skills],
            'lifestyle_clusters': [{'cluster_id': row.prediction, 'count': row.count, 'avg_depression': float(row.avg_depression)} for row in cluster_analysis]
        }
        return JsonResponse(result_data)

基于大数据的学生抑郁数据分析系统文档展示

在这里插入图片描述

💖💖作者:计算机毕业设计杰瑞 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学校实战项目 计算机毕业设计选题推荐