【大数据】广西医疗机构数据可视化分析系统 计算机毕业设计项目 Hadoop+Spark环境配置 数据科学与大数据技术 附源码+文档+讲解

29 阅读6分钟

一、个人简介

💖💖作者:计算机编程果茶熊 💙💙个人简介:曾长期从事计算机专业培训教学,担任过编程老师,同时本人也热爱上课教学,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 计算机毕业设计选题 💕💕文末获取源码联系计算机编程果茶熊

二、系统介绍

大数据框架:Hadoop+Spark(Hive需要定制修改) 开发语言:Java+Python(两个版本都支持) 数据库:MySQL 后端框架:SpringBoot(Spring+SpringMVC+Mybatis)+Django(两个版本都支持) 前端:Vue+Echarts+HTML+CSS+JavaScript+jQuery

三、视频解说

广西医疗机构数据可视化分析系统

四、部分功能展示

在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述

五、部分代码展示


from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, sum, avg, max, min, when, desc, asc
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, FloatType
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views.decorators.http import require_http_methods
import mysql.connector
import json
import math

spark = SparkSession.builder.appName("GuangxiMedicalAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.serializer", "org.apache.spark.serializer.KryoSerializer").getOrCreate()

@require_http_methods(["GET"])
def medical_resource_accessibility_analysis(request):
    region_id = request.GET.get('region_id', 'all')
    analysis_type = request.GET.get('type', 'distance')
    medical_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_db").option("dbtable", "medical_institutions").option("user", "root").option("password", "password").load()
    population_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_db").option("dbtable", "population_distribution").option("user", "root").option("password", "password").load()
    if region_id != 'all':
        medical_df = medical_df.filter(col("region_id") == region_id)
        population_df = population_df.filter(col("region_id") == region_id)
    medical_df.createOrReplaceTempView("medical_institutions")
    population_df.createOrReplaceTempView("population_distribution")
    if analysis_type == 'distance':
        accessibility_query = """
        SELECT p.county_name, p.population_count,
               MIN(SQRT(POWER(p.latitude - m.latitude, 2) + POWER(p.longitude - m.longitude, 2)) * 111.12) as min_distance,
               AVG(SQRT(POWER(p.latitude - m.latitude, 2) + POWER(p.longitude - m.longitude, 2)) * 111.12) as avg_distance,
               COUNT(CASE WHEN SQRT(POWER(p.latitude - m.latitude, 2) + POWER(p.longitude - m.longitude, 2)) * 111.12 <= 5 THEN 1 END) as nearby_institutions
        FROM population_distribution p
        CROSS JOIN medical_institutions m
        WHERE m.status = 'active'
        GROUP BY p.county_name, p.population_count
        """
    else:
        accessibility_query = """
        SELECT p.county_name, p.population_count,
               COUNT(m.institution_id) as total_institutions,
               SUM(m.bed_count) as total_beds,
               SUM(m.doctor_count) as total_doctors,
               ROUND(SUM(m.bed_count) * 1000.0 / p.population_count, 2) as beds_per_thousand,
               ROUND(SUM(m.doctor_count) * 1000.0 / p.population_count, 2) as doctors_per_thousand
        FROM population_distribution p
        LEFT JOIN medical_institutions m ON p.region_id = m.region_id
        WHERE m.status = 'active'
        GROUP BY p.county_name, p.population_count
        """
    result_df = spark.sql(accessibility_query)
    accessibility_score_df = result_df.withColumn("accessibility_score", 
        when(col("min_distance") <= 2, 100)
        .when(col("min_distance") <= 5, 80)
        .when(col("min_distance") <= 10, 60)
        .when(col("min_distance") <= 20, 40)
        .otherwise(20))
    final_result = accessibility_score_df.orderBy(desc("accessibility_score")).collect()
    result_data = []
    for row in final_result:
        result_data.append({
            'county_name': row['county_name'],
            'population_count': row['population_count'],
            'min_distance': round(row['min_distance'], 2) if 'min_distance' in row.asDict() else None,
            'avg_distance': round(row['avg_distance'], 2) if 'avg_distance' in row.asDict() else None,
            'nearby_institutions': row['nearby_institutions'] if 'nearby_institutions' in row.asDict() else None,
            'accessibility_score': row['accessibility_score'] if 'accessibility_score' in row.asDict() else None,
            'beds_per_thousand': row['beds_per_thousand'] if 'beds_per_thousand' in row.asDict() else None,
            'doctors_per_thousand': row['doctors_per_thousand'] if 'doctors_per_thousand' in row.asDict() else None
        })
    return JsonResponse({'status': 'success', 'data': result_data, 'total': len(result_data)})

@require_http_methods(["GET"])
def institution_geographic_distribution_analysis(request):
    level_filter = request.GET.get('level', 'all')
    city_filter = request.GET.get('city', 'all')
    institutions_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_db").option("dbtable", "medical_institutions").option("user", "root").option("password", "password").load()
    if level_filter != 'all':
        institutions_df = institutions_df.filter(col("institution_level") == level_filter)
    if city_filter != 'all':
        institutions_df = institutions_df.filter(col("city_code") == city_filter)
    institutions_df.createOrReplaceTempView("institutions")
    city_distribution_query = """
    SELECT city_name, city_code,
           COUNT(*) as institution_count,
           COUNT(CASE WHEN institution_level = '三级甲等' THEN 1 END) as level_3a_count,
           COUNT(CASE WHEN institution_level = '三级乙等' THEN 1 END) as level_3b_count,
           COUNT(CASE WHEN institution_level = '二级甲等' THEN 1 END) as level_2a_count,
           COUNT(CASE WHEN institution_level = '二级乙等' THEN 1 END) as level_2b_count,
           COUNT(CASE WHEN institution_level = '一级' THEN 1 END) as level_1_count,
           SUM(bed_count) as total_beds,
           SUM(doctor_count) as total_doctors,
           AVG(bed_count) as avg_beds_per_institution,
           AVG(doctor_count) as avg_doctors_per_institution
    FROM institutions
    WHERE status = 'active'
    GROUP BY city_name, city_code
    ORDER BY institution_count DESC
    """
    city_results = spark.sql(city_distribution_query)
    county_distribution_query = """
    SELECT county_name, city_name,
           COUNT(*) as institution_count,
           institution_type,
           COUNT(*) as type_count,
           AVG(latitude) as center_lat,
           AVG(longitude) as center_lng,
           SUM(bed_count) as total_beds
    FROM institutions
    WHERE status = 'active'
    GROUP BY county_name, city_name, institution_type
    ORDER BY city_name, county_name, type_count DESC
    """
    county_results = spark.sql(county_distribution_query)
    density_analysis_query = """
    SELECT city_name,
           COUNT(*) as institution_count,
           ROUND(COUNT(*) / (MAX(area_km2)), 2) as institution_density,
           CASE 
               WHEN COUNT(*) / MAX(area_km2) >= 0.5 THEN '高密度'
               WHEN COUNT(*) / MAX(area_km2) >= 0.2 THEN '中密度'
               ELSE '低密度'
           END as density_level
    FROM institutions
    WHERE status = 'active' AND area_km2 > 0
    GROUP BY city_name
    ORDER BY institution_density DESC
    """
    density_results = spark.sql(density_analysis_query)
    city_data = [row.asDict() for row in city_results.collect()]
    county_data = [row.asDict() for row in county_results.collect()]
    density_data = [row.asDict() for row in density_results.collect()]
    geographic_clusters = []
    for city_row in city_data:
        city_counties = [county for county in county_data if county['city_name'] == city_row['city_name']]
        cluster_info = {
            'city_name': city_row['city_name'],
            'total_institutions': city_row['institution_count'],
            'counties': city_counties,
            'density_info': next((d for d in density_data if d['city_name'] == city_row['city_name']), {})
        }
        geographic_clusters.append(cluster_info)
    return JsonResponse({'status': 'success', 'city_distribution': city_data, 'county_distribution': county_data, 'density_analysis': density_data, 'geographic_clusters': geographic_clusters})

@require_http_methods(["GET"])
def medical_insurance_coverage_analysis(request):
    start_date = request.GET.get('start_date', '2023-01-01')
    end_date = request.GET.get('end_date', '2023-12-31')
    insurance_type = request.GET.get('insurance_type', 'all')
    institutions_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_db").option("dbtable", "medical_institutions").option("user", "root").option("password", "password").load()
    insurance_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_db").option("dbtable", "insurance_records").option("user", "root").option("password", "password").load()
    population_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_db").option("dbtable", "population_distribution").option("user", "root").option("password", "password").load()
    insurance_df = insurance_df.filter((col("record_date") >= start_date) & (col("record_date") <= end_date))
    if insurance_type != 'all':
        insurance_df = insurance_df.filter(col("insurance_type") == insurance_type)
    institutions_df.createOrReplaceTempView("institutions")
    insurance_df.createOrReplaceTempView("insurance_records")
    population_df.createOrReplaceTempView("population")
    coverage_analysis_query = """
    SELECT i.city_name, i.county_name,
           COUNT(DISTINCT inst.institution_id) as total_institutions,
           COUNT(DISTINCT CASE WHEN inst.accepts_medical_insurance = 1 THEN inst.institution_id END) as insured_institutions,
           ROUND(COUNT(DISTINCT CASE WHEN inst.accepts_medical_insurance = 1 THEN inst.institution_id END) * 100.0 / COUNT(DISTINCT inst.institution_id), 2) as institution_coverage_rate,
           SUM(i.insured_amount) as total_insured_amount,
           SUM(i.total_amount) as total_medical_amount,
           ROUND(SUM(i.insured_amount) * 100.0 / SUM(i.total_amount), 2) as amount_coverage_rate,
           COUNT(i.patient_id) as total_patients,
           COUNT(DISTINCT i.patient_id) as unique_patients,
           AVG(i.insured_amount) as avg_insured_per_visit,
           SUM(CASE WHEN i.insurance_type = '城镇职工' THEN i.insured_amount ELSE 0 END) as urban_employee_amount,
           SUM(CASE WHEN i.insurance_type = '城乡居民' THEN i.insured_amount ELSE 0 END) as resident_amount,
           SUM(CASE WHEN i.insurance_type = '新农合' THEN i.insured_amount ELSE 0 END) as rural_coop_amount
    FROM insurance_records i
    JOIN institutions inst ON i.institution_id = inst.institution_id
    WHERE inst.status = 'active'
    GROUP BY i.city_name, i.county_name
    ORDER BY institution_coverage_rate DESC, amount_coverage_rate DESC
    """
    coverage_results = spark.sql(coverage_analysis_query)
    regional_coverage_query = """
    SELECT city_name,
           SUM(total_institutions) as city_total_institutions,
           SUM(insured_institutions) as city_insured_institutions,
           ROUND(SUM(insured_institutions) * 100.0 / SUM(total_institutions), 2) as city_coverage_rate,
           SUM(total_insured_amount) as city_insured_amount,
           SUM(total_medical_amount) as city_medical_amount,
           ROUND(SUM(total_insured_amount) * 100.0 / SUM(total_medical_amount), 2) as city_amount_coverage
    FROM ({}) city_coverage
    GROUP BY city_name
    ORDER BY city_coverage_rate DESC
    """.format(coverage_analysis_query.replace("ORDER BY institution_coverage_rate DESC, amount_coverage_rate DESC", ""))
    regional_results = spark.sql(regional_coverage_query)
    insurance_type_analysis_query = """
    SELECT insurance_type,
           COUNT(*) as record_count,
           SUM(insured_amount) as type_insured_amount,
           SUM(total_amount) as type_total_amount,
           ROUND(SUM(insured_amount) * 100.0 / SUM(total_amount), 2) as type_coverage_rate,
           COUNT(DISTINCT patient_id) as covered_patients,
           AVG(insured_amount) as avg_coverage_per_visit
    FROM insurance_records
    WHERE record_date BETWEEN '{}' AND '{}'
    GROUP BY insurance_type
    ORDER BY type_coverage_rate DESC
    """.format(start_date, end_date)
    type_results = spark.sql(insurance_type_analysis_query)
    coverage_data = [row.asDict() for row in coverage_results.collect()]
    regional_data = [row.asDict() for row in regional_results.collect()]
    type_data = [row.asDict() for row in type_results.collect()]
    overall_coverage = {
        'total_institutions': sum([row['city_total_institutions'] for row in regional_data]),
        'insured_institutions': sum([row['city_insured_institutions'] for row in regional_data]),
        'overall_coverage_rate': round(sum([row['city_insured_institutions'] for row in regional_data]) * 100.0 / sum([row['city_total_institutions'] for row in regional_data]), 2),
        'total_insured_amount': sum([row['city_insured_amount'] for row in regional_data]),
        'total_medical_amount': sum([row['city_medical_amount'] for row in regional_data]),
        'overall_amount_coverage': round(sum([row['city_insured_amount'] for row in regional_data]) * 100.0 / sum([row['city_medical_amount'] for row in regional_data]), 2)
    }
    return JsonResponse({'status': 'success', 'county_coverage': coverage_data, 'city_coverage': regional_data, 'insurance_type_analysis': type_data, 'overall_statistics': overall_coverage})

六、部分文档展示

在这里插入图片描述

七、END

💕💕文末获取源码联系计算机编程果茶熊