💖💖作者:计算机毕业设计小途 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目
@TOC
基于大数据的广西医疗机构数据可视化分析系统介绍
《基于大数据的广西医疗机构数据可视化分析系统》是一套采用先进大数据技术栈构建的综合性医疗数据分析平台,系统以Hadoop分布式存储框架和Spark大数据计算引擎为核心,结合HDFS分布式文件系统实现海量医疗机构数据的高效存储与处理,通过Spark SQL进行复杂数据查询分析,并利用Pandas和NumPy进行深度数据挖掘和统计计算。系统采用前后端分离架构设计,后端基于Django框架和Spring Boot微服务架构提供稳定的API服务,前端运用Vue.js响应式框架结合ElementUI组件库构建现代化用户界面,通过Echarts数据可视化库实现丰富的图表展示效果。系统核心功能涵盖广西医疗机构数据的全方位管理与分析,包括医疗资源可达性分析、机构地理分布分析、医保服务覆盖率分析、机构规模结构分析以及机构服务能力分析等五大核心分析模块,能够从多维度深入挖掘广西地区医疗资源配置现状与服务能力水平。系统特别设计了数据大屏可视化展示功能,通过直观的图表、地图和实时数据展示,为医疗管理部门提供科学决策支持,同时配备完善的用户管理、权限控制和系统管理功能,确保数据安全性和系统稳定性,为广西医疗卫生事业的数字化转型和智能化管理提供有力的技术支撑。
基于大数据的广西医疗机构数据可视化分析系统演示视频
基于大数据的广西医疗机构数据可视化分析系统演示图片
基于大数据的广西医疗机构数据可视化分析系统代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, sum, avg, max, min, when, isnan, isnull
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json
spark = SparkSession.builder.appName("GuangxiMedicalAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()
@csrf_exempt
def medical_institution_data_management(request):
if request.method == 'POST':
data = json.loads(request.body)
df = spark.createDataFrame([(data['institution_name'], data['institution_type'], data['level'], data['address'], data['longitude'], data['latitude'], data['bed_count'], data['doctor_count'], data['medical_insurance'])], ['name', 'type', 'level', 'address', 'longitude', 'latitude', 'bed_count', 'doctor_count', 'medical_insurance'])
df.write.mode('append').option("header", "true").csv("/hadoop/medical_data/institutions")
processed_df = df.withColumn("resource_index", col("bed_count") + col("doctor_count") * 2)
processed_df = processed_df.withColumn("service_level", when(col("resource_index") > 500, "高").when(col("resource_index") > 200, "中").otherwise("低"))
processed_df = processed_df.filter(col("longitude").isNotNull() & col("latitude").isNotNull())
quality_check = processed_df.select([count(when(isnan(c) | isnull(c), c)).alias(c) for c in processed_df.columns])
quality_result = quality_check.collect()[0].asDict()
total_records = processed_df.count()
valid_records = processed_df.filter(col("bed_count") > 0).count()
coverage_stats = processed_df.groupBy("medical_insurance").agg(count("*").alias("count"), avg("bed_count").alias("avg_beds"))
pandas_stats = coverage_stats.toPandas()
result_data = {"total_records": total_records, "valid_records": valid_records, "data_quality": quality_result, "coverage_analysis": pandas_stats.to_dict('records')}
processed_df.createOrReplaceTempView("institutions_temp")
spark.sql("INSERT INTO medical_analysis.institutions_processed SELECT * FROM institutions_temp")
return JsonResponse({"status": "success", "message": "医疗机构数据处理完成", "analysis_result": result_data})
@csrf_exempt
def medical_resource_accessibility_analysis(request):
institutions_df = spark.read.option("header", "true").csv("/hadoop/medical_data/institutions")
population_df = spark.read.option("header", "true").csv("/hadoop/medical_data/population_density")
institutions_df = institutions_df.withColumn("longitude", col("longitude").cast("double")).withColumn("latitude", col("latitude").cast("double"))
population_df = population_df.withColumn("pop_longitude", col("pop_longitude").cast("double")).withColumn("pop_latitude", col("pop_latitude").cast("double"))
institutions_df.createOrReplaceTempView("institutions")
population_df.createOrReplaceTempView("population")
accessibility_query = """
SELECT p.region_code, p.population_count, p.pop_longitude, p.pop_latitude,
COUNT(i.name) as nearby_institutions,
AVG(SQRT(POW(i.longitude - p.pop_longitude, 2) + POW(i.latitude - p.pop_latitude, 2)) * 111.32) as avg_distance_km,
MIN(SQRT(POW(i.longitude - p.pop_longitude, 2) + POW(i.latitude - p.pop_latitude, 2)) * 111.32) as min_distance_km,
SUM(CASE WHEN i.level = '三甲' THEN i.bed_count ELSE 0 END) as high_level_beds,
SUM(CASE WHEN SQRT(POW(i.longitude - p.pop_longitude, 2) + POW(i.latitude - p.pop_latitude, 2)) * 111.32 <= 10 THEN 1 ELSE 0 END) as institutions_within_10km
FROM population p
LEFT JOIN institutions i ON SQRT(POW(i.longitude - p.pop_longitude, 2) + POW(i.latitude - p.pop_latitude, 2)) * 111.32 <= 50
GROUP BY p.region_code, p.population_count, p.pop_longitude, p.pop_latitude
"""
accessibility_result = spark.sql(accessibility_query)
accessibility_result = accessibility_result.withColumn("accessibility_score",
when(col("avg_distance_km") <= 5, 100)
.when(col("avg_distance_km") <= 10, 80)
.when(col("avg_distance_km") <= 20, 60)
.when(col("avg_distance_km") <= 30, 40)
.otherwise(20))
accessibility_result = accessibility_result.withColumn("service_adequacy", col("high_level_beds") / col("population_count") * 1000)
low_accessibility_regions = accessibility_result.filter(col("accessibility_score") < 60).orderBy(col("accessibility_score"))
pandas_result = accessibility_result.toPandas()
low_access_pandas = low_accessibility_regions.toPandas()
summary_stats = accessibility_result.agg(avg("accessibility_score").alias("avg_score"), min("accessibility_score").alias("min_score"), max("accessibility_score").alias("max_score"), count("*").alias("total_regions")).collect()[0]
return JsonResponse({"accessibility_analysis": pandas_result.to_dict('records'), "low_accessibility_regions": low_access_pandas.to_dict('records'), "summary_statistics": summary_stats.asDict()})
@csrf_exempt
def data_dashboard_visualization(request):
institutions_df = spark.read.option("header", "true").csv("/hadoop/medical_data/institutions")
institutions_df = institutions_df.withColumn("bed_count", col("bed_count").cast("int")).withColumn("doctor_count", col("doctor_count").cast("int"))
regional_distribution = institutions_df.groupBy("address").agg(count("*").alias("institution_count"), sum("bed_count").alias("total_beds"), sum("doctor_count").alias("total_doctors"), avg("bed_count").alias("avg_beds_per_institution"))
type_analysis = institutions_df.groupBy("type").agg(count("*").alias("count"), avg("bed_count").alias("avg_beds"), avg("doctor_count").alias("avg_doctors"))
level_analysis = institutions_df.groupBy("level").agg(count("*").alias("count"), sum("bed_count").alias("total_beds"))
insurance_coverage = institutions_df.groupBy("medical_insurance").agg(count("*").alias("institution_count"), sum("bed_count").alias("covered_beds"))
total_coverage_rate = insurance_coverage.agg(sum(when(col("medical_insurance") == "是", col("covered_beds")).otherwise(0)) / sum("covered_beds") * 100).collect()[0][0]
service_capacity_analysis = institutions_df.withColumn("capacity_level",
when(col("bed_count") > 800, "大型")
.when(col("bed_count") > 300, "中型")
.when(col("bed_count") > 100, "小型")
.otherwise("微型"))
capacity_distribution = service_capacity_analysis.groupBy("capacity_level").agg(count("*").alias("count"), avg("doctor_count").alias("avg_doctors"))
geographical_hotspots = institutions_df.select("longitude", "latitude", "bed_count", "type").filter(col("longitude").isNotNull())
hotspot_analysis = geographical_hotspots.withColumn("lng_grid", (col("longitude") * 10).cast("int") / 10).withColumn("lat_grid", (col("latitude") * 10).cast("int") / 10)
grid_density = hotspot_analysis.groupBy("lng_grid", "lat_grid").agg(count("*").alias("density"), sum("bed_count").alias("total_capacity"))
top_density_areas = grid_density.orderBy(col("density").desc()).limit(10)
regional_pandas = regional_distribution.toPandas()
type_pandas = type_analysis.toPandas()
level_pandas = level_analysis.toPandas()
insurance_pandas = insurance_coverage.toPandas()
capacity_pandas = capacity_distribution.toPandas()
hotspot_pandas = top_density_areas.toPandas()
dashboard_data = {"regional_stats": regional_pandas.to_dict('records'), "type_distribution": type_pandas.to_dict('records'), "level_distribution": level_pandas.to_dict('records'), "insurance_coverage": insurance_pandas.to_dict('records'), "total_coverage_rate": round(total_coverage_rate, 2), "capacity_analysis": capacity_pandas.to_dict('records'), "geographical_hotspots": hotspot_pandas.to_dict('records')}
return JsonResponse({"dashboard_data": dashboard_data, "status": "success"})
基于大数据的广西医疗机构数据可视化分析系统文档展示
💖💖作者:计算机毕业设计小途 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目