前言
💖💖作者:计算机程序员小杨 💙💙个人简介:我是一名计算机相关专业的从业者,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。热爱技术,喜欢钻研新工具和框架,也乐于通过代码解决实际问题,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💕💕文末获取源码联系 计算机程序员小杨 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目 计算机毕业设计选题 💜💜
一.开发工具简介
大数据框架:Hadoop+Spark(本次没用Hive,支持定制) 开发语言:Python+Java(两个版本都支持) 后端框架:Django+Spring Boot(Spring+SpringMVC+Mybatis)(两个版本都支持) 前端:Vue+ElementUI+Echarts+HTML+CSS+JavaScript+jQuery 详细技术点:Hadoop、HDFS、Spark、Spark SQL、Pandas、NumPy 数据库:MySQL
二.系统内容简介
医院急诊患者行为分析系统是一套基于大数据技术的医疗数据智能分析平台。系统采用Hadoop+Spark分布式计算框架作为数据处理引擎,通过HDFS实现海量急诊数据的存储与管理。后端使用Django框架构建RESTful API接口,结合Spark SQL进行复杂的数据查询与统计分析,利用Pandas和NumPy完成数据清洗、特征提取等预处理工作。前端采用Vue+ElementUI搭建交互界面,通过Echarts实现患者特征、急诊行为、满意度评价、医疗资源利用等多维度数据的可视化展示。系统核心功能涵盖用户权限管理、急诊患者基础数据管理、患者特征画像分析、急诊就医行为模式挖掘、满意度评估、医疗资源配置分析以及多维度综合分析看板,并提供可视化大屏用于数据集中展示。通过对急诊患者的就诊时间分布、疾病类型、等候时长、诊疗流程、资源占用等关键指标进行深度分析,为医院管理者提供数据支持,辅助优化急诊科室资源配置与服务流程。
三.系统功能演示
四.系统界面展示
五.系统源码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, avg, sum, when, hour, date_format, unix_timestamp, lag, dense_rank
from pyspark.sql.window import Window
from django.http import JsonResponse
from django.views import View
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import json
spark = SparkSession.builder.appName("EmergencyPatientAnalysis").config("spark.sql.warehouse.dir", "/user/hive/warehouse").config("spark.executor.memory", "4g").config("spark.driver.memory", "2g").getOrCreate()
class PatientBehaviorAnalysis(View):
def post(self, request):
params = json.loads(request.body)
start_date = params.get('start_date')
end_date = params.get('end_date')
patient_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/emergency_db").option("dbtable", "patient_records").option("user", "root").option("password", "password").load()
filtered_df = patient_df.filter((col("admission_time") >= start_date) & (col("admission_time") <= end_date))
filtered_df = filtered_df.withColumn("admission_hour", hour(col("admission_time")))
filtered_df = filtered_df.withColumn("wait_minutes", (unix_timestamp(col("treatment_start_time")) - unix_timestamp(col("admission_time"))) / 60)
hourly_distribution = filtered_df.groupBy("admission_hour").agg(count("patient_id").alias("patient_count"), avg("wait_minutes").alias("avg_wait_time")).orderBy("admission_hour")
disease_distribution = filtered_df.groupBy("disease_type").agg(count("patient_id").alias("case_count"), avg("treatment_duration").alias("avg_duration")).orderBy(col("case_count").desc()).limit(10)
age_groups = filtered_df.withColumn("age_group", when(col("age") < 18, "儿童").when((col("age") >= 18) & (col("age") < 40), "青年").when((col("age") >= 40) & (col("age") < 60), "中年").otherwise("老年"))
age_behavior = age_groups.groupBy("age_group").agg(count("patient_id").alias("total_patients"), avg("wait_minutes").alias("avg_wait"), avg("satisfaction_score").alias("avg_satisfaction")).orderBy("age_group")
repeat_patients = filtered_df.groupBy("patient_id").agg(count("visit_id").alias("visit_count")).filter(col("visit_count") > 1)
repeat_rate = (repeat_patients.count() / filtered_df.select("patient_id").distinct().count()) * 100
window_spec = Window.partitionBy("patient_id").orderBy("admission_time")
patient_sequence = filtered_df.withColumn("prev_visit_time", lag("admission_time").over(window_spec))
patient_sequence = patient_sequence.withColumn("days_between_visits", (unix_timestamp(col("admission_time")) - unix_timestamp(col("prev_visit_time"))) / 86400)
avg_revisit_interval = patient_sequence.filter(col("days_between_visits").isNotNull()).agg(avg("days_between_visits").alias("avg_interval")).collect()[0]["avg_interval"]
peak_hours = hourly_distribution.filter(col("patient_count") > hourly_distribution.agg(avg("patient_count")).collect()[0][0]).select("admission_hour").rdd.flatMap(lambda x: x).collect()
result_data = {"hourly_distribution": hourly_distribution.toPandas().to_dict(orient='records'),"disease_distribution": disease_distribution.toPandas().to_dict(orient='records'),"age_behavior": age_behavior.toPandas().to_dict(orient='records'),"repeat_visit_rate": round(repeat_rate, 2),"avg_revisit_interval": round(avg_revisit_interval, 2) if avg_revisit_interval else 0,"peak_hours": peak_hours}
return JsonResponse({"code": 200, "data": result_data, "message": "患者行为分析完成"})
class ResourceUtilizationAnalysis(View):
def post(self, request):
params = json.loads(request.body)
department_id = params.get('department_id')
analysis_date = params.get('analysis_date')
resource_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/emergency_db").option("dbtable", "resource_usage").option("user", "root").option("password", "password").load()
patient_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/emergency_db").option("dbtable", "patient_records").option("user", "root").option("password", "password").load()
filtered_resource = resource_df.filter((col("department_id") == department_id) & (date_format(col("usage_date"), "yyyy-MM-dd") == analysis_date))
filtered_patient = patient_df.filter((col("department_id") == department_id) & (date_format(col("admission_time"), "yyyy-MM-dd") == analysis_date))
bed_usage = filtered_resource.filter(col("resource_type") == "bed").agg(avg("usage_rate").alias("avg_bed_usage"), sum("usage_hours").alias("total_bed_hours")).collect()[0]
equipment_usage = filtered_resource.filter(col("resource_type") == "equipment").groupBy("equipment_name").agg(sum("usage_count").alias("total_usage"), avg("usage_duration").alias("avg_duration")).orderBy(col("total_usage").desc())
doctor_workload = filtered_patient.groupBy("attending_doctor").agg(count("patient_id").alias("patient_count"), sum("treatment_duration").alias("total_work_minutes"), avg("treatment_duration").alias("avg_treatment_time")).orderBy(col("patient_count").desc())
medicine_consumption = filtered_resource.filter(col("resource_type") == "medicine").groupBy("medicine_name").agg(sum("quantity").alias("total_quantity"), sum("cost").alias("total_cost")).orderBy(col("total_cost").desc()).limit(20)
total_patients = filtered_patient.count()
total_doctors = doctor_workload.count()
doctor_patient_ratio = round(total_patients / total_doctors, 2) if total_doctors > 0 else 0
high_load_doctors = doctor_workload.filter(col("total_work_minutes") > 480).select("attending_doctor", "patient_count", "total_work_minutes")
equipment_utilization_rate = equipment_usage.withColumn("utilization_score", col("total_usage") * col("avg_duration") / 480)
underutilized_equipment = equipment_utilization_rate.filter(col("utilization_score") < 0.5).select("equipment_name", "total_usage", "utilization_score")
window_rank = Window.orderBy(col("total_cost").desc())
high_cost_medicines = medicine_consumption.withColumn("cost_rank", dense_rank().over(window_rank)).filter(col("cost_rank") <= 5)
result_data = {"bed_usage": {"avg_usage_rate": round(bed_usage["avg_bed_usage"], 2) if bed_usage["avg_bed_usage"] else 0,"total_hours": round(bed_usage["total_bed_hours"], 2) if bed_usage["total_bed_hours"] else 0},"equipment_usage": equipment_usage.toPandas().to_dict(orient='records'),"doctor_workload": doctor_workload.toPandas().to_dict(orient='records'),"medicine_consumption": medicine_consumption.toPandas().to_dict(orient='records'),"doctor_patient_ratio": doctor_patient_ratio,"high_load_doctors": high_load_doctors.toPandas().to_dict(orient='records'),"underutilized_equipment": underutilized_equipment.toPandas().to_dict(orient='records'),"high_cost_medicines": high_cost_medicines.toPandas().to_dict(orient='records')}
return JsonResponse({"code": 200, "data": result_data, "message": "医疗资源利用分析完成"})
class SatisfactionAnalysis(View):
def post(self, request):
params = json.loads(request.body)
start_date = params.get('start_date')
end_date = params.get('end_date')
satisfaction_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/emergency_db").option("dbtable", "patient_satisfaction").option("user", "root").option("password", "password").load()
patient_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/emergency_db").option("dbtable", "patient_records").option("user", "root").option("password", "password").load()
filtered_satisfaction = satisfaction_df.filter((col("feedback_date") >= start_date) & (col("feedback_date") <= end_date))
joined_df = filtered_satisfaction.join(patient_df, filtered_satisfaction.patient_id == patient_df.patient_id, "left")
overall_satisfaction = joined_df.agg(avg("satisfaction_score").alias("avg_score"), count("satisfaction_id").alias("total_feedback")).collect()[0]
satisfaction_distribution = joined_df.withColumn("score_level", when(col("satisfaction_score") >= 90, "非常满意").when((col("satisfaction_score") >= 70) & (col("satisfaction_score") < 90), "满意").when((col("satisfaction_score") >= 50) & (col("satisfaction_score") < 70), "一般").otherwise("不满意")).groupBy("score_level").agg(count("satisfaction_id").alias("count")).orderBy(col("count").desc())
dimension_scores = joined_df.groupBy().agg(avg("service_attitude_score").alias("avg_service"), avg("treatment_effect_score").alias("avg_effect"), avg("environment_score").alias("avg_environment"), avg("waiting_time_score").alias("avg_waiting"))
wait_time_satisfaction = joined_df.withColumn("wait_time_range", when(col("wait_minutes") < 15, "0-15分钟").when((col("wait_minutes") >= 15) & (col("wait_minutes") < 30), "15-30分钟").when((col("wait_minutes") >= 30) & (col("wait_minutes") < 60), "30-60分钟").otherwise("60分钟以上")).groupBy("wait_time_range").agg(avg("satisfaction_score").alias("avg_score"), count("satisfaction_id").alias("feedback_count")).orderBy("wait_time_range")
doctor_satisfaction = joined_df.groupBy("attending_doctor").agg(avg("satisfaction_score").alias("avg_doctor_score"), count("satisfaction_id").alias("feedback_count")).filter(col("feedback_count") >= 5).orderBy(col("avg_doctor_score").desc())
disease_satisfaction = joined_df.groupBy("disease_type").agg(avg("satisfaction_score").alias("avg_score"), count("satisfaction_id").alias("feedback_count")).filter(col("feedback_count") >= 3).orderBy(col("avg_score"))
complaint_analysis = joined_df.filter(col("has_complaint") == 1).groupBy("complaint_type").agg(count("satisfaction_id").alias("complaint_count")).orderBy(col("complaint_count").desc())
low_satisfaction_records = joined_df.filter(col("satisfaction_score") < 60).select("patient_id", "satisfaction_score", "feedback_content", "complaint_type", "wait_minutes", "attending_doctor").orderBy("satisfaction_score").limit(10)
satisfaction_trend_df = joined_df.withColumn("feedback_week", date_format(col("feedback_date"), "yyyy-ww")).groupBy("feedback_week").agg(avg("satisfaction_score").alias("weekly_avg_score"), count("satisfaction_id").alias("weekly_feedback_count")).orderBy("feedback_week")
result_data = {"overall_satisfaction": {"avg_score": round(overall_satisfaction["avg_score"], 2) if overall_satisfaction["avg_score"] else 0,"total_feedback": overall_satisfaction["total_feedback"]},"satisfaction_distribution": satisfaction_distribution.toPandas().to_dict(orient='records'),"dimension_scores": dimension_scores.toPandas().to_dict(orient='records')[0] if dimension_scores.count() > 0 else {},"wait_time_satisfaction": wait_time_satisfaction.toPandas().to_dict(orient='records'),"doctor_satisfaction": doctor_satisfaction.toPandas().to_dict(orient='records'),"disease_satisfaction": disease_satisfaction.toPandas().to_dict(orient='records'),"complaint_analysis": complaint_analysis.toPandas().to_dict(orient='records'),"low_satisfaction_records": low_satisfaction_records.toPandas().to_dict(orient='records'),"satisfaction_trend": satisfaction_trend_df.toPandas().to_dict(orient='records')}
return JsonResponse({"code": 200, "data": result_data, "message": "患者满意度分析完成"})
六.系统文档展示
结束
💕💕文末获取源码联系 计算机程序员小杨