一、个人简介
- 💖💖作者:计算机编程果茶熊
- 💙💙个人简介:曾长期从事计算机专业培训教学,担任过编程老师,同时本人也热爱上课教学,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我!
- 💛💛想说的话:感谢大家的关注与支持!
- 💜💜
- 网站实战项目
- 安卓/小程序实战项目
- 大数据实战项目
- 计算机毕业设计选题
- 💕💕文末获取源码联系计算机编程果茶熊
二、系统介绍
- 大数据框架:Hadoop+Spark(Hive需要定制修改)
- 开发语言:Java+Python(两个版本都支持)
- 数据库:MySQL
- 后端框架:SpringBoot(Spring+SpringMVC+Mybatis)+Django(两个版本都支持)
- 前端:Vue+Echarts+HTML+CSS+JavaScript+jQuery
本系统《基于大数据的慢性肾病数据可视化分析系统》采用Hadoop+Spark大数据处理框架,结合Python开发语言和Django后端框架构建而成。系统通过HDFS分布式存储海量慢性肾病患者数据,利用Spark SQL进行高效数据处理和分析计算,前端采用Vue+ElementUI+Echarts技术栈实现交互式数据可视化展示。系统核心功能涵盖慢性肾病数据管理、血液生化指标分析、临床模式分析、疾病流行率分析、疾病进展分析、肾功能分析以及多指标综合分析等模块。通过Pandas和NumPy进行数据预处理,结合Spark的分布式计算能力,能够快速处理大规模医疗数据集,为医疗研究人员和临床医生提供直观的数据洞察和分析结果。系统支持多维度数据筛选、实时图表展示和趋势预测,帮助用户深入理解慢性肾病的发病规律和治疗效果评估。
三、基于大数据的慢性肾病数据可视化分析系统-视频解说
GitHub热门大数据项目:基于Hadoop+Spark的慢性肾病数据可视化分析系统技术解析
四、基于大数据的慢性肾病数据可视化分析系统-功能展示
五、基于大数据的慢性肾病数据可视化分析系统-代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, avg, count, max, min, when, desc, asc
from pyspark.sql.types import StructType, StructField, StringType, FloatType, IntegerType, DateType
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json
spark = SparkSession.builder.appName("ChronicKidneyDiseaseAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()
@csrf_exempt
def blood_biochemical_analysis(request):
if request.method == 'POST':
data = json.loads(request.body)
patient_ids = data.get('patient_ids', [])
analysis_type = data.get('analysis_type', 'comprehensive')
time_range = data.get('time_range', 30)
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/kidney_disease").option("dbtable", "blood_biochemical_data").option("user", "root").option("password", "password").load()
if patient_ids:
df = df.filter(col("patient_id").isin(patient_ids))
df = df.filter(col("test_date") >= (col("test_date") - time_range))
if analysis_type == 'creatinine':
result_df = df.select("patient_id", "creatinine", "test_date").groupBy("patient_id").agg(avg("creatinine").alias("avg_creatinine"), max("creatinine").alias("max_creatinine"), min("creatinine").alias("min_creatinine"), count("*").alias("test_count"))
elif analysis_type == 'urea':
result_df = df.select("patient_id", "urea_nitrogen", "test_date").groupBy("patient_id").agg(avg("urea_nitrogen").alias("avg_urea"), max("urea_nitrogen").alias("max_urea"), min("urea_nitrogen").alias("min_urea"), count("*").alias("test_count"))
else:
result_df = df.groupBy("patient_id").agg(avg("creatinine").alias("avg_creatinine"), avg("urea_nitrogen").alias("avg_urea"), avg("hemoglobin").alias("avg_hemoglobin"), avg("albumin").alias("avg_albumin"), count("*").alias("total_tests"))
abnormal_df = result_df.withColumn("creatinine_status", when(col("avg_creatinine") > 1.2, "High").when(col("avg_creatinine") < 0.6, "Low").otherwise("Normal"))
abnormal_df = abnormal_df.withColumn("risk_level", when((col("avg_creatinine") > 2.0) | (col("avg_urea") > 50), "High Risk").when((col("avg_creatinine") > 1.5) | (col("avg_urea") > 35), "Medium Risk").otherwise("Low Risk"))
pandas_result = abnormal_df.toPandas()
statistics = pandas_result.describe().to_dict()
risk_distribution = pandas_result['risk_level'].value_counts().to_dict()
trend_data = df.groupBy("test_date").agg(avg("creatinine").alias("daily_avg_creatinine"), avg("urea_nitrogen").alias("daily_avg_urea")).orderBy("test_date").toPandas()
correlation_matrix = pandas_result[['avg_creatinine', 'avg_urea', 'avg_hemoglobin', 'avg_albumin']].corr().to_dict()
return JsonResponse({'status': 'success', 'statistics': statistics, 'risk_distribution': risk_distribution, 'trend_data': trend_data.to_dict('records'), 'correlation_matrix': correlation_matrix, 'patient_count': len(pandas_result)})
@csrf_exempt
def disease_progression_analysis(request):
if request.method == 'POST':
data = json.loads(request.body)
stage_criteria = data.get('stage_criteria', 'gfr')
time_interval = data.get('time_interval', 'monthly')
patient_filter = data.get('patient_filter', {})
progression_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/kidney_disease").option("dbtable", "disease_progression").option("user", "root").option("password", "password").load()
patient_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/kidney_disease").option("dbtable", "patient_info").option("user", "root").option("password", "password").load()
joined_df = progression_df.join(patient_df, "patient_id", "inner")
if patient_filter.get('age_range'):
min_age, max_age = patient_filter['age_range']
joined_df = joined_df.filter((col("age") >= min_age) & (col("age") <= max_age))
if patient_filter.get('gender'):
joined_df = joined_df.filter(col("gender") == patient_filter['gender'])
if stage_criteria == 'gfr':
stage_df = joined_df.withColumn("ckd_stage", when(col("gfr") >= 90, "Stage 1").when(col("gfr") >= 60, "Stage 2").when(col("gfr") >= 45, "Stage 3a").when(col("gfr") >= 30, "Stage 3b").when(col("gfr") >= 15, "Stage 4").otherwise("Stage 5"))
else:
stage_df = joined_df.withColumn("ckd_stage", when(col("creatinine") <= 1.2, "Early").when(col("creatinine") <= 2.0, "Moderate").otherwise("Advanced"))
progression_stats = stage_df.groupBy("patient_id", "ckd_stage").agg(count("*").alias("stage_duration"), avg("gfr").alias("avg_gfr"), min("record_date").alias("stage_start"), max("record_date").alias("stage_end"))
stage_transition = stage_df.select("patient_id", "ckd_stage", "record_date").orderBy("patient_id", "record_date")
window_spec = Window.partitionBy("patient_id").orderBy("record_date")
transition_df = stage_transition.withColumn("previous_stage", lag("ckd_stage").over(window_spec)).withColumn("stage_changed", when(col("ckd_stage") != col("previous_stage"), 1).otherwise(0))
progression_rate = transition_df.filter(col("stage_changed") == 1).groupBy("ckd_stage", "previous_stage").count().withColumnRenamed("count", "transition_count")
survival_analysis = stage_df.groupBy("ckd_stage").agg(avg("gfr").alias("mean_gfr"), count("patient_id").alias("patient_count"), avg(datediff(col("stage_end"), col("stage_start"))).alias("avg_stage_duration"))
risk_factors = joined_df.groupBy("diabetes", "hypertension", "ckd_stage").agg(count("patient_id").alias("patient_count"), avg("gfr").alias("avg_gfr"))
progression_pandas = progression_rate.toPandas()
survival_pandas = survival_analysis.toPandas()
risk_pandas = risk_factors.toPandas()
mortality_risk = stage_df.filter(col("status") == "deceased").groupBy("ckd_stage").count().toPandas()
return JsonResponse({'status': 'success', 'progression_rates': progression_pandas.to_dict('records'), 'survival_analysis': survival_pandas.to_dict('records'), 'risk_factors': risk_pandas.to_dict('records'), 'mortality_risk': mortality_risk.to_dict('records'), 'total_patients': stage_df.select("patient_id").distinct().count()})
@csrf_exempt
def multi_indicator_analysis(request):
if request.method == 'POST':
data = json.loads(request.body)
indicators = data.get('indicators', ['creatinine', 'urea_nitrogen', 'hemoglobin'])
analysis_method = data.get('analysis_method', 'correlation')
clustering_params = data.get('clustering_params', {'n_clusters': 3})
lab_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/kidney_disease").option("dbtable", "laboratory_results").option("user", "root").option("password", "password").load()
clinical_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/kidney_disease").option("dbtable", "clinical_data").option("user", "root").option("password", "password").load()
combined_df = lab_df.join(clinical_df, "patient_id", "inner")
selected_columns = ["patient_id"] + indicators + ["age", "gender", "diagnosis_date"]
analysis_df = combined_df.select(*selected_columns).na.drop()
if analysis_method == 'correlation':
pandas_df = analysis_df.toPandas()
correlation_matrix = pandas_df[indicators].corr()
correlation_result = correlation_matrix.to_dict()
elif analysis_method == 'regression':
feature_cols = indicators[1:]
target_col = indicators[0]
regression_df = analysis_df.select(*feature_cols, target_col)
pandas_regression = regression_df.toPandas()
X = pandas_regression[feature_cols]
y = pandas_regression[target_col]
correlation_result = np.corrcoef(X.T, y)[:-1, -1].tolist()
else:
pandas_df = analysis_df.toPandas()
correlation_result = pandas_df[indicators].corr().to_dict()
outlier_detection = analysis_df.select(*indicators)
for indicator in indicators:
quantiles = outlier_detection.approxQuantile(indicator, [0.25, 0.75], 0.01)
if len(quantiles) == 2:
Q1, Q3 = quantiles
IQR = Q3 - Q1
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
outlier_detection = outlier_detection.withColumn(f"{indicator}_outlier", when((col(indicator) < lower_bound) | (col(indicator) > upper_bound), 1).otherwise(0))
outlier_stats = outlier_detection.select(*[f"{ind}_outlier" for ind in indicators]).toPandas().sum().to_dict()
pattern_analysis = combined_df.groupBy("gender", "age_group").agg(*[avg(ind).alias(f"avg_{ind}") for ind in indicators], count("patient_id").alias("patient_count"))
pattern_pandas = pattern_analysis.toPandas()
cluster_data = pandas_df[indicators].values
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=clustering_params['n_clusters'], random_state=42)
cluster_labels = kmeans.fit_predict(cluster_data)
pandas_df['cluster'] = cluster_labels
cluster_summary = pandas_df.groupby('cluster')[indicators].mean().to_dict()
risk_scoring = pandas_df.copy()
risk_scoring['risk_score'] = (risk_scoring['creatinine'] * 0.4 + risk_scoring['urea_nitrogen'] * 0.3 + (100 - risk_scoring['hemoglobin']) * 0.3)
risk_distribution = risk_scoring['risk_score'].describe().to_dict()
return JsonResponse({'status': 'success', 'correlation_analysis': correlation_result, 'outlier_statistics': outlier_stats, 'pattern_analysis': pattern_pandas.to_dict('records'), 'cluster_analysis': cluster_summary, 'risk_distribution': risk_distribution, 'total_patients_analyzed': len(pandas_df)})
六、基于大数据的慢性肾病数据可视化分析系统-文档展示
七、END
- 💛💛想说的话:感谢大家的关注与支持!
- 💜💜
- 网站实战项目
- 安卓/小程序实战项目
- 大数据实战项目
- 计算机毕业设计选题
- 💕💕文末获取源码联系计算机编程果茶熊