前言
💖💖作者:计算机程序员小杨 💙💙个人简介:我是一名计算机相关专业的从业者,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。热爱技术,喜欢钻研新工具和框架,也乐于通过代码解决实际问题,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💕💕文末获取源码联系 计算机程序员小杨 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目 计算机毕业设计选题 💜💜
一.开发工具简介
大数据框架:Hadoop+Spark(本次没用Hive,支持定制) 开发语言:Python+Java(两个版本都支持) 后端框架:Django+Spring Boot(Spring+SpringMVC+Mybatis)(两个版本都支持) 前端:Vue+ElementUI+Echarts+HTML+CSS+JavaScript+jQuery 详细技术点:Hadoop、HDFS、Spark、Spark SQL、Pandas、NumPy 数据库:MySQL
二.系统内容简介
《睡眠中人体压力数据可视化分析系统》是一个基于大数据技术的健康监测分析平台,采用Hadoop+Spark分布式计算框架处理海量睡眠监测数据,通过Python语言结合Django后端框架和Vue+ElementUI+Echarts前端技术栈构建完整的数据分析解决方案。系统核心功能涵盖压力水平指标均值分析、生理指标关联度分析、睡眠压力水平分布分析、综合健康指数趋势分析以及可视化大屏展示,能够对用户睡眠期间的心率、血压、体温等生理参数进行实时采集和深度挖掘,运用Spark SQL进行高效数据查询,结合Pandas和NumPy进行统计分析,将复杂的生理数据转化为直观的图表和趋势报告,为用户提供个性化的睡眠质量评估和健康建议,同时为医疗机构和研究人员提供科学的数据支撑和决策依据。
三.系统功能演示
四.系统界面展示
五.系统源码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views import View
import json
from datetime import datetime, timedelta
spark = SparkSession.builder.appName("SleepPressureAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()
class PressureLevelAnalysisView(View):
def post(self, request):
data = json.loads(request.body)
user_id = data.get('user_id')
start_date = data.get('start_date')
end_date = data.get('end_date')
sleep_data_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/sleep_db").option("dbtable", "sleep_pressure_data").option("user", "root").option("password", "password").load()
filtered_df = sleep_data_df.filter((col("user_id") == user_id) & (col("record_date") >= start_date) & (col("record_date") <= end_date))
pressure_stats = filtered_df.groupBy("sleep_stage").agg(avg("heart_rate").alias("avg_heart_rate"), avg("blood_pressure_sys").alias("avg_bp_sys"), avg("blood_pressure_dia").alias("avg_bp_dia"), avg("stress_index").alias("avg_stress"), stddev("heart_rate").alias("std_heart_rate"), count("*").alias("record_count"))
pressure_levels = pressure_stats.withColumn("pressure_level", when(col("avg_stress") < 30, "低压力").when(col("avg_stress") < 60, "中压力").otherwise("高压力"))
pressure_distribution = pressure_levels.groupBy("pressure_level").agg(sum("record_count").alias("total_records"), avg("avg_heart_rate").alias("mean_heart_rate"), avg("avg_stress").alias("mean_stress_index"))
result_pandas = pressure_distribution.toPandas()
trend_analysis = filtered_df.withColumn("hour", hour("timestamp")).groupBy("hour").agg(avg("stress_index").alias("hourly_stress"), avg("heart_rate").alias("hourly_hr"), avg("body_temp").alias("hourly_temp"))
hourly_trends = trend_analysis.orderBy("hour").toPandas()
correlation_matrix = filtered_df.select("heart_rate", "blood_pressure_sys", "stress_index", "body_temp").toPandas().corr()
risk_assessment = filtered_df.withColumn("risk_score", (col("stress_index") * 0.4 + col("heart_rate") / 100 * 0.3 + col("blood_pressure_sys") / 140 * 0.3)).groupBy("user_id").agg(avg("risk_score").alias("avg_risk"), max("risk_score").alias("max_risk"))
return JsonResponse({"pressure_distribution": result_pandas.to_dict('records'), "hourly_trends": hourly_trends.to_dict('records'), "correlation_data": correlation_matrix.to_dict(), "risk_assessment": risk_assessment.toPandas().to_dict('records')})
class PhysiologicalCorrelationView(View):
def post(self, request):
data = json.loads(request.body)
user_ids = data.get('user_ids', [])
analysis_period = data.get('period', 30)
end_date = datetime.now()
start_date = end_date - timedelta(days=analysis_period)
physiological_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/sleep_db").option("dbtable", "physiological_indicators").option("user", "root").option("password", "password").load()
target_df = physiological_df.filter((col("user_id").isin(user_ids)) & (col("measurement_time") >= start_date) & (col("measurement_time") <= end_date))
normalized_df = target_df.withColumn("norm_heart_rate", (col("heart_rate") - 60) / 40).withColumn("norm_blood_pressure", (col("systolic_pressure") - 120) / 20).withColumn("norm_oxygen_saturation", (col("oxygen_saturation") - 95) / 5).withColumn("norm_stress_hormone", (col("cortisol_level") - 10) / 15)
correlation_pairs = [("norm_heart_rate", "norm_stress_hormone"), ("norm_blood_pressure", "norm_stress_hormone"), ("norm_oxygen_saturation", "norm_heart_rate"), ("norm_heart_rate", "norm_blood_pressure")]
correlation_results = []
for pair in correlation_pairs:
corr_df = normalized_df.select(pair[0], pair[1]).filter((col(pair[0]).isNotNull()) & (col(pair[1]).isNotNull()))
pandas_corr = corr_df.toPandas()
correlation_coeff = pandas_corr[pair[0]].corr(pandas_corr[pair[1]])
correlation_results.append({"indicator_1": pair[0], "indicator_2": pair[1], "correlation": float(correlation_coeff)})
sleep_quality_impact = target_df.withColumn("sleep_efficiency", col("deep_sleep_duration") / col("total_sleep_duration")).withColumn("arousal_frequency", col("awakening_count") / col("total_sleep_duration") * 60)
quality_correlation = sleep_quality_impact.groupBy("user_id").agg(avg("sleep_efficiency").alias("avg_efficiency"), avg("arousal_frequency").alias("avg_arousal"), avg("norm_stress_hormone").alias("avg_stress_level"), corr("sleep_efficiency", "norm_stress_hormone").alias("efficiency_stress_corr"))
physiological_patterns = target_df.withColumn("time_period", when(hour("measurement_time") < 6, "深度睡眠期").when(hour("measurement_time") < 8, "浅睡眠期").otherwise("清醒期")).groupBy("time_period", "user_id").agg(avg("heart_rate").alias("period_avg_hr"), avg("systolic_pressure").alias("period_avg_bp"), avg("cortisol_level").alias("period_avg_cortisol"))
pattern_analysis = physiological_patterns.groupBy("time_period").agg(avg("period_avg_hr").alias("overall_avg_hr"), avg("period_avg_bp").alias("overall_avg_bp"), avg("period_avg_cortisol").alias("overall_avg_cortisol"))
return JsonResponse({"correlations": correlation_results, "sleep_quality_impact": quality_correlation.toPandas().to_dict('records'), "physiological_patterns": pattern_analysis.toPandas().to_dict('records')})
class HealthIndexTrendView(View):
def post(self, request):
data = json.loads(request.body)
user_id = data.get('user_id')
trend_days = data.get('days', 90)
end_date = datetime.now()
start_date = end_date - timedelta(days=trend_days)
health_metrics_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/sleep_db").option("dbtable", "comprehensive_health_data").option("user", "root").option("password", "password").load()
user_health_df = health_metrics_df.filter((col("user_id") == user_id) & (col("record_date") >= start_date) & (col("record_date") <= end_date))
daily_health_index = user_health_df.withColumn("cardiovascular_score", (100 - col("resting_heart_rate")) * 0.3 + (140 - col("systolic_bp")) / 140 * 30).withColumn("sleep_quality_score", col("deep_sleep_ratio") * 40 + (8 - col("awakening_times")) * 5).withColumn("stress_score", (100 - col("cortisol_level")) * 0.4).withColumn("overall_health_index", col("cardiovascular_score") + col("sleep_quality_score") + col("stress_score"))
weekly_trends = daily_health_index.withColumn("week_number", weekofyear("record_date")).groupBy("week_number").agg(avg("overall_health_index").alias("weekly_avg_index"), avg("cardiovascular_score").alias("weekly_cardio"), avg("sleep_quality_score").alias("weekly_sleep"), avg("stress_score").alias("weekly_stress"), min("overall_health_index").alias("weekly_min"), max("overall_health_index").alias("weekly_max"))
monthly_comparison = daily_health_index.withColumn("month", month("record_date")).groupBy("month").agg(avg("overall_health_index").alias("monthly_avg"), stddev("overall_health_index").alias("monthly_std"), count("*").alias("data_points"))
health_category_distribution = daily_health_index.withColumn("health_category", when(col("overall_health_index") >= 80, "优秀").when(col("overall_health_index") >= 60, "良好").when(col("overall_health_index") >= 40, "一般").otherwise("需改善")).groupBy("health_category").agg(count("*").alias("category_count"), avg("overall_health_index").alias("category_avg_score"))
trend_prediction = daily_health_index.withColumn("day_sequence", datediff("record_date", lit(start_date))).select("day_sequence", "overall_health_index")
trend_pandas = trend_prediction.toPandas()
slope = np.polyfit(trend_pandas['day_sequence'], trend_pandas['overall_health_index'], 1)[0]
prediction_days = 30
future_trend = [{"day": i, "predicted_index": trend_pandas['overall_health_index'].iloc[-1] + slope * i} for i in range(1, prediction_days + 1)]
improvement_suggestions = []
latest_metrics = daily_health_index.orderBy(desc("record_date")).first()
if latest_metrics['cardiovascular_score'] < 25:
improvement_suggestions.append("建议增加有氧运动,改善心血管健康")
if latest_metrics['sleep_quality_score'] < 30:
improvement_suggestions.append("建议调整睡眠环境,提高深度睡眠比例")
if latest_metrics['stress_score'] < 25:
improvement_suggestions.append("建议进行压力管理训练,降低皮质醇水平")
return JsonResponse({"weekly_trends": weekly_trends.orderBy("week_number").toPandas().to_dict('records'), "monthly_comparison": monthly_comparison.toPandas().to_dict('records'), "health_distribution": health_category_distribution.toPandas().to_dict('records'), "trend_slope": float(slope), "future_predictions": future_trend, "improvement_suggestions": improvement_suggestions})
六.系统文档展示
结束
💕💕文末获取源码联系 计算机程序员小杨