一、个人简介
💖💖作者:计算机编程果茶熊 💙💙个人简介:曾长期从事计算机专业培训教学,担任过编程老师,同时本人也热爱上课教学,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 计算机毕业设计选题 💕💕文末获取源码联系计算机编程果茶熊
二、系统介绍
大数据框架:Hadoop+Spark(Hive需要定制修改) 开发语言:Java+Python(两个版本都支持) 数据库:MySQL 后端框架:SpringBoot(Spring+SpringMVC+Mybatis)+Django(两个版本都支持) 前端:Vue+Echarts+HTML+CSS+JavaScript+jQuery
学生习惯数据可视化分析系统是一个基于大数据技术的学生行为分析平台,采用Hadoop+Spark分布式计算框架进行海量学生数据的处理与分析。系统后端采用Django框架构建,前端使用Vue+ElementUI+Echarts技术栈实现交互式数据可视化界面。通过集成Spark SQL、Pandas、NumPy等数据处理工具,系统能够从多维度对学生的学习行为、生活习惯、身心状态等进行深度挖掘和智能分析。平台主要包含背景因素分析、学业习惯分析、身心健康分析、外部环境分析、综合画像分析和可视化大屏六大核心功能模块,为教育管理者提供学生行为模式识别、习惯养成追踪、健康状态监测等数据支撑。系统通过HDFS分布式存储学生行为数据,利用Spark引擎进行实时计算和批处理分析,最终通过Echarts图表库将分析结果以直观的可视化形式展现,帮助学校更好地了解学生群体特征,制定个性化的教育策略和管理方案。
三、视频解说
四、部分功能展示
五、部分代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, avg, count, sum as spark_sum, when, desc, asc
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, FloatType
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json
def analyze_study_habits(request):
spark = SparkSession.builder.appName("StudyHabitsAnalysis").config("spark.sql.adaptive.enabled", "true").getOrCreate()
study_data = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/student_db").option("dbtable", "study_records").option("user", "root").option("password", "password").load()
daily_study_time = study_data.groupBy("student_id", "study_date").agg(spark_sum("study_duration").alias("daily_duration"))
avg_study_time = daily_study_time.groupBy("student_id").agg(avg("daily_duration").alias("avg_daily_study"))
subject_preference = study_data.groupBy("student_id", "subject").agg(spark_sum("study_duration").alias("subject_time")).withColumn("rank", row_number().over(Window.partitionBy("student_id").orderBy(desc("subject_time"))))
study_pattern = study_data.groupBy("student_id", "time_period").agg(count("*").alias("frequency")).withColumn("pattern_score", when(col("time_period") == "morning", col("frequency") * 1.2).when(col("time_period") == "afternoon", col("frequency") * 1.0).otherwise(col("frequency") * 0.8))
concentration_analysis = study_data.groupBy("student_id").agg(avg("concentration_score").alias("avg_concentration"), avg("completion_rate").alias("avg_completion"))
habit_consistency = study_data.groupBy("student_id", "week_day").agg(count("*").alias("study_frequency")).groupBy("student_id").agg((count("*") / 7.0).alias("consistency_score"))
study_efficiency = study_data.withColumn("efficiency", col("knowledge_gained") / col("study_duration")).groupBy("student_id").agg(avg("efficiency").alias("avg_efficiency"))
comprehensive_analysis = avg_study_time.join(concentration_analysis, "student_id").join(habit_consistency, "student_id").join(study_efficiency, "student_id")
study_habit_classification = comprehensive_analysis.withColumn("habit_type", when((col("avg_daily_study") > 180) & (col("avg_concentration") > 0.7), "优秀学习型").when((col("avg_daily_study") > 120) & (col("consistency_score") > 0.6), "稳定学习型").when(col("avg_efficiency") > 0.8, "高效学习型").otherwise("待提升型"))
result_df = study_habit_classification.toPandas()
spark.stop()
return JsonResponse({"status": "success", "data": result_df.to_dict("records")})
def analyze_health_status(request):
spark = SparkSession.builder.appName("HealthAnalysis").config("spark.sql.adaptive.enabled", "true").getOrCreate()
health_data = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/student_db").option("dbtable", "health_records").option("user", "root").option("password", "password").load()
sleep_analysis = health_data.groupBy("student_id").agg(avg("sleep_duration").alias("avg_sleep"), avg("sleep_quality").alias("avg_sleep_quality"))
exercise_analysis = health_data.groupBy("student_id").agg(spark_sum("exercise_duration").alias("total_exercise"), avg("exercise_intensity").alias("avg_intensity"))
mental_health_scores = health_data.groupBy("student_id").agg(avg("stress_level").alias("avg_stress"), avg("mood_score").alias("avg_mood"), avg("anxiety_level").alias("avg_anxiety"))
physical_indicators = health_data.groupBy("student_id").agg(avg("heart_rate").alias("avg_heart_rate"), avg("blood_pressure_sys").alias("avg_bp_sys"), avg("blood_pressure_dia").alias("avg_bp_dia"))
lifestyle_habits = health_data.groupBy("student_id").agg(avg("screen_time").alias("avg_screen_time"), count(when(col("meal_regularity") == 1, 1)).alias("regular_meals"))
health_risk_assessment = mental_health_scores.withColumn("mental_risk", when((col("avg_stress") > 7) | (col("avg_anxiety") > 6), "高风险").when((col("avg_stress") > 5) | (col("avg_anxiety") > 4), "中等风险").otherwise("低风险"))
comprehensive_health = sleep_analysis.join(exercise_analysis, "student_id").join(mental_health_scores, "student_id").join(physical_indicators, "student_id").join(lifestyle_habits, "student_id").join(health_risk_assessment, "student_id")
health_score_calculation = comprehensive_health.withColumn("sleep_score", when(col("avg_sleep") >= 7, 100).when(col("avg_sleep") >= 6, 80).when(col("avg_sleep") >= 5, 60).otherwise(40)).withColumn("exercise_score", when(col("total_exercise") >= 150, 100).when(col("total_exercise") >= 100, 80).otherwise(50)).withColumn("mental_score", 100 - (col("avg_stress") * 10 + col("avg_anxiety") * 8))
overall_health_rating = health_score_calculation.withColumn("overall_score", (col("sleep_score") + col("exercise_score") + col("mental_score")) / 3).withColumn("health_level", when(col("overall_score") >= 80, "优秀").when(col("overall_score") >= 60, "良好").when(col("overall_score") >= 40, "一般").otherwise("需关注"))
health_trends = health_data.withColumn("month", month(col("record_date"))).groupBy("student_id", "month").agg(avg("stress_level").alias("monthly_stress"), avg("mood_score").alias("monthly_mood")).withColumn("trend", when(lag("monthly_stress").over(Window.partitionBy("student_id").orderBy("month")) > col("monthly_stress"), "改善").otherwise("稳定"))
result_df = overall_health_rating.toPandas()
spark.stop()
return JsonResponse({"status": "success", "data": result_df.to_dict("records")})
def generate_comprehensive_profile(request):
spark = SparkSession.builder.appName("ComprehensiveProfile").config("spark.sql.adaptive.enabled", "true").getOrCreate()
student_basic = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/student_db").option("dbtable", "student_basic_info").option("user", "root").option("password", "password").load()
academic_data = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/student_db").option("dbtable", "academic_records").option("user", "root").option("password", "password").load()
behavioral_data = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/student_db").option("dbtable", "behavior_records").option("user", "root").option("password", "password").load()
social_data = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/student_db").option("dbtable", "social_activities").option("user", "root").option("password", "password").load()
academic_performance = academic_data.groupBy("student_id").agg(avg("score").alias("avg_score"), avg("rank_in_class").alias("avg_rank"), count("*").alias("exam_count"))
behavioral_patterns = behavioral_data.groupBy("student_id").agg(avg("attendance_rate").alias("avg_attendance"), avg("assignment_completion").alias("avg_assignment"), avg("class_participation").alias("avg_participation"))
social_engagement = social_data.groupBy("student_id").agg(count("*").alias("activity_count"), avg("leadership_score").alias("avg_leadership"), avg("teamwork_score").alias("avg_teamwork"))
learning_style_analysis = academic_data.join(behavioral_data, "student_id").withColumn("learning_preference", when((col("visual_score") > col("auditory_score")) & (col("visual_score") > col("kinesthetic_score")), "视觉学习型").when((col("auditory_score") > col("visual_score")) & (col("auditory_score") > col("kinesthetic_score")), "听觉学习型").otherwise("动觉学习型"))
personality_traits = behavioral_data.withColumn("personality_type", when((col("extroversion_score") > 6) & (col("conscientiousness_score") > 7), "外向自律型").when((col("extroversion_score") < 4) & (col("openness_score") > 6), "内向创新型").when(col("agreeableness_score") > 7, "友善合作型").otherwise("平衡发展型"))
comprehensive_profile = student_basic.join(academic_performance, "student_id").join(behavioral_patterns, "student_id").join(social_engagement, "student_id").join(learning_style_analysis, "student_id").join(personality_traits, "student_id")
student_clustering = comprehensive_profile.withColumn("student_type", when((col("avg_score") > 85) & (col("avg_attendance") > 0.9), "学霸型").when((col("activity_count") > 5) & (col("avg_leadership") > 6), "领导型").when((col("avg_teamwork") > 7) & (col("avg_participation") > 6), "合作型").when(col("avg_score") < 60, "潜力型").otherwise("均衡型"))
development_suggestions = student_clustering.withColumn("improvement_areas", when(col("avg_attendance") < 0.8, "提高出勤率").when(col("avg_assignment") < 0.7, "加强作业完成").when(col("activity_count") < 2, "增加社团参与").otherwise("保持现状")).withColumn("strength_areas", when(col("avg_score") > 80, "学术优秀").when(col("avg_leadership") > 6, "领导能力强").when(col("avg_teamwork") > 7, "团队协作佳").otherwise("待发掘"))
risk_prediction = comprehensive_profile.withColumn("dropout_risk", when((col("avg_score") < 50) | (col("avg_attendance") < 0.6), "高风险").when((col("avg_score") < 70) | (col("avg_attendance") < 0.8), "中等风险").otherwise("低风险"))
final_profile = development_suggestions.join(risk_prediction, "student_id").withColumn("profile_completeness", (col("exam_count") + col("activity_count")) / 20.0).withColumn("data_reliability", when(col("profile_completeness") > 0.8, "高").when(col("profile_completeness") > 0.5, "中").otherwise("低"))
result_df = final_profile.toPandas()
profile_summary = result_df.groupby('student_type').agg({'student_id': 'count', 'avg_score': 'mean', 'avg_attendance': 'mean'}).reset_index()
spark.stop()
return JsonResponse({"status": "success", "profiles": result_df.to_dict("records"), "summary": profile_summary.to_dict("records")})
六、部分文档展示
七、END
💕💕文末获取源码联系计算机编程果茶熊