💖💖作者:计算机毕业设计小途 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目
@TOC
北京市医保药品数据分析系统介绍
《基于大数据的北京市医保药品数据分析系统》是一套专门针对医保药品数据进行深度挖掘与智能分析的综合性大数据应用系统。该系统采用目前主流的大数据技术架构,以Hadoop分布式存储框架为基础,利用HDFS实现海量医保药品数据的可靠存储与管理,通过Spark大数据计算引擎配合Spark SQL进行高效的数据处理与实时分析。系统后端采用Python语言结合Django框架构建,充分发挥Python在数据科学领域的优势,集成Pandas和NumPy等专业数据分析库,实现对北京市医保药品数据的多维度统计分析。前端界面基于Vue.js框架开发,结合ElementUI组件库提供友好的用户交互体验,通过Echarts可视化图表库将复杂的数据分析结果以直观的图表形式展现。系统功能涵盖完整的数据分析流程,包括系统首页总览、个人中心管理、用户权限管理等基础模块,核心分析功能包括大屏数据可视化展示、医保药品信息的全面管理、药品核心属性的深度分析、基于机器学习算法的药品数据挖掘分析、药品生产厂家的统计分析、医保报销策略的智能分析以及中药及颗粒的专项分析等模块。整个系统充分体现了大数据技术在医疗健康领域的实际应用价值,为医保管理部门提供科学的决策支持,同时也为计算机专业学生提供了一个集大数据存储、处理、分析、可视化于一体的完整项目实践平台。
北京市医保药品数据分析系统演示视频
北京市医保药品数据分析系统演示图片
北京市医保药品数据分析系统代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, sum, avg, max, min, desc, asc, when, isnan, isnull
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json
spark = SparkSession.builder.appName("MedicalInsuranceDrugAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()
@csrf_exempt
def drug_core_attribute_analysis(request):
if request.method == 'POST':
data = json.loads(request.body)
drug_category = data.get('drug_category', '')
price_range = data.get('price_range', [0, 10000])
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_insurance").option("dbtable", "drug_info").option("user", "root").option("password", "password").load()
if drug_category:
df = df.filter(col("drug_category") == drug_category)
df = df.filter((col("drug_price") >= price_range[0]) & (col("drug_price") <= price_range[1]))
df = df.filter(col("drug_price").isNotNull() & col("drug_name").isNotNull())
price_stats = df.agg(avg("drug_price").alias("avg_price"), max("drug_price").alias("max_price"), min("drug_price").alias("min_price"), count("drug_id").alias("total_count")).collect()[0]
efficacy_distribution = df.groupBy("drug_efficacy").agg(count("drug_id").alias("count"), avg("drug_price").alias("avg_efficacy_price")).orderBy(desc("count"))
manufacturer_stats = df.groupBy("manufacturer_name").agg(count("drug_id").alias("drug_count"), avg("drug_price").alias("avg_manufacturer_price")).orderBy(desc("drug_count"))
reimbursement_analysis = df.groupBy("reimbursement_ratio").agg(count("drug_id").alias("ratio_count"), avg("drug_price").alias("avg_ratio_price")).orderBy(desc("reimbursement_ratio"))
price_distribution = df.select(when(col("drug_price") <= 50, "低价药品").when((col("drug_price") > 50) & (col("drug_price") <= 200), "中价药品").otherwise("高价药品").alias("price_level")).groupBy("price_level").agg(count("*").alias("count"))
dosage_form_analysis = df.groupBy("dosage_form").agg(count("drug_id").alias("form_count"), avg("drug_price").alias("avg_form_price"), avg("reimbursement_ratio").alias("avg_reimbursement")).orderBy(desc("form_count"))
prescription_type_stats = df.groupBy("prescription_type").agg(count("drug_id").alias("type_count"), avg("drug_price").alias("avg_type_price")).orderBy(desc("type_count"))
result_data = {"price_statistics": {"average_price": float(price_stats["avg_price"]), "max_price": float(price_stats["max_price"]), "min_price": float(price_stats["min_price"]), "total_drugs": int(price_stats["total_count"])}, "efficacy_distribution": [{"efficacy": row["drug_efficacy"], "count": row["count"], "avg_price": float(row["avg_efficacy_price"])} for row in efficacy_distribution.collect()[:10]], "manufacturer_ranking": [{"manufacturer": row["manufacturer_name"], "drug_count": row["drug_count"], "avg_price": float(row["avg_manufacturer_price"])} for row in manufacturer_stats.collect()[:15]], "reimbursement_analysis": [{"ratio": float(row["reimbursement_ratio"]), "count": row["ratio_count"], "avg_price": float(row["avg_ratio_price"])} for row in reimbursement_analysis.collect()], "price_distribution": [{"level": row["price_level"], "count": row["count"]} for row in price_distribution.collect()], "dosage_form_stats": [{"form": row["dosage_form"], "count": row["form_count"], "avg_price": float(row["avg_form_price"]), "avg_reimbursement": float(row["avg_reimbursement"])} for row in dosage_form_analysis.collect()], "prescription_type_stats": [{"type": row["prescription_type"], "count": row["type_count"], "avg_price": float(row["avg_type_price"])} for row in prescription_type_stats.collect()]}
return JsonResponse({"code": 200, "data": result_data, "message": "药品核心属性分析完成"})
@csrf_exempt
def drug_data_mining_analysis(request):
if request.method == 'POST':
data = json.loads(request.body)
analysis_type = data.get('analysis_type', 'correlation')
time_period = data.get('time_period', 30)
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_insurance").option("dbtable", "drug_usage_records").option("user", "root").option("password", "password").load()
usage_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_insurance").option("dbtable", "drug_info").option("user", "root").option("password", "password").load()
joined_df = df.join(usage_df, df.drug_id == usage_df.drug_id, "inner")
joined_df = joined_df.filter(col("usage_date").isNotNull() & col("usage_count").isNotNull())
if analysis_type == 'correlation':
price_usage_corr = joined_df.select("drug_price", "usage_count").toPandas()
correlation_matrix = np.corrcoef(price_usage_corr["drug_price"], price_usage_corr["usage_count"])[0, 1]
high_correlation_drugs = joined_df.filter((col("drug_price") > 100) & (col("usage_count") > 50)).select("drug_name", "drug_price", "usage_count", "reimbursement_ratio")
elif analysis_type == 'trend':
trend_df = joined_df.groupBy("usage_date", "drug_category").agg(sum("usage_count").alias("daily_usage"), count("drug_id").alias("drug_types"))
monthly_trend = joined_df.groupBy("drug_category").agg(avg("usage_count").alias("avg_monthly_usage"), sum("usage_count").alias("total_usage"))
elif analysis_type == 'anomaly':
usage_stats = joined_df.groupBy("drug_id").agg(avg("usage_count").alias("avg_usage"), max("usage_count").alias("max_usage"), min("usage_count").alias("min_usage"))
anomaly_threshold = joined_df.agg(avg("usage_count")).collect()[0][0] * 3
anomaly_drugs = joined_df.filter(col("usage_count") > anomaly_threshold).select("drug_name", "usage_count", "usage_date", "drug_price")
frequent_patterns = joined_df.groupBy("drug_category", "manufacturer_name").agg(count("drug_id").alias("pattern_count"), avg("usage_count").alias("avg_pattern_usage")).filter(col("pattern_count") > 10).orderBy(desc("pattern_count"))
seasonal_analysis = joined_df.select("*", when(col("usage_date").substr(6, 2).isin(["03", "04", "05"]), "春季").when(col("usage_date").substr(6, 2).isin(["06", "07", "08"]), "夏季").when(col("usage_date").substr(6, 2).isin(["09", "10", "11"]), "秋季").otherwise("冬季").alias("season")).groupBy("season", "drug_category").agg(sum("usage_count").alias("seasonal_usage"), avg("drug_price").alias("seasonal_avg_price"))
usage_prediction_data = joined_df.select("drug_category", "usage_count", "drug_price", "reimbursement_ratio").toPandas()
usage_prediction_data["usage_level"] = pd.cut(usage_prediction_data["usage_count"], bins=[0, 10, 50, 100, float('inf')], labels=["低", "中", "高", "极高"])
category_usage_prediction = usage_prediction_data.groupby(["drug_category", "usage_level"]).size().unstack(fill_value=0)
clustering_features = joined_df.select("drug_price", "usage_count", "reimbursement_ratio").toPandas()
clustering_features["price_usage_ratio"] = clustering_features["drug_price"] / (clustering_features["usage_count"] + 1)
clustering_features["reimbursement_usage_score"] = clustering_features["reimbursement_ratio"] * clustering_features["usage_count"]
result_data = {"correlation_analysis": {"price_usage_correlation": float(correlation_matrix) if 'correlation_matrix' in locals() else 0, "high_correlation_drugs": [{"drug_name": row["drug_name"], "price": float(row["drug_price"]), "usage": row["usage_count"], "reimbursement": float(row["reimbursement_ratio"])} for row in high_correlation_drugs.collect()[:10]] if 'high_correlation_drugs' in locals() else []}, "frequent_patterns": [{"category": row["drug_category"], "manufacturer": row["manufacturer_name"], "pattern_count": row["pattern_count"], "avg_usage": float(row["avg_pattern_usage"])} for row in frequent_patterns.collect()[:20]], "seasonal_analysis": [{"season": row["season"], "category": row["drug_category"], "usage": row["seasonal_usage"], "avg_price": float(row["seasonal_avg_price"])} for row in seasonal_analysis.collect()], "anomaly_detection": [{"drug_name": row["drug_name"], "usage_count": row["usage_count"], "usage_date": row["usage_date"], "price": float(row["drug_price"])} for row in anomaly_drugs.collect()[:15]] if 'anomaly_drugs' in locals() else [], "clustering_summary": {"total_samples": len(clustering_features), "avg_price_usage_ratio": float(clustering_features["price_usage_ratio"].mean()), "avg_reimbursement_score": float(clustering_features["reimbursement_usage_score"].mean())}}
return JsonResponse({"code": 200, "data": result_data, "message": "药品数据挖掘分析完成"})
@csrf_exempt
def medical_reimbursement_strategy_analysis(request):
if request.method == 'POST':
data = json.loads(request.body)
strategy_type = data.get('strategy_type', 'comprehensive')
region_filter = data.get('region', '')
reimbursement_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_insurance").option("dbtable", "reimbursement_records").option("user", "root").option("password", "password").load()
drug_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_insurance").option("dbtable", "drug_info").option("user", "root").option("password", "password").load()
policy_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/medical_insurance").option("dbtable", "reimbursement_policy").option("user", "root").option("password", "password").load()
comprehensive_df = reimbursement_df.join(drug_df, "drug_id", "inner").join(policy_df, "policy_id", "inner")
if region_filter:
comprehensive_df = comprehensive_df.filter(col("region") == region_filter)
comprehensive_df = comprehensive_df.filter(col("reimbursement_amount").isNotNull() & col("original_price").isNotNull())
reimbursement_efficiency = comprehensive_df.groupBy("drug_category", "reimbursement_ratio").agg(sum("reimbursement_amount").alias("total_reimbursement"), sum("original_price").alias("total_original"), count("record_id").alias("reimbursement_count"), avg("reimbursement_amount").alias("avg_reimbursement"))
reimbursement_efficiency = reimbursement_efficiency.withColumn("actual_ratio", col("total_reimbursement") / col("total_original")).withColumn("efficiency_score", col("actual_ratio") * col("reimbursement_count") / 1000)
policy_effectiveness = comprehensive_df.groupBy("policy_name", "policy_type").agg(sum("reimbursement_amount").alias("policy_total_amount"), count("record_id").alias("policy_usage_count"), avg("reimbursement_ratio").alias("avg_policy_ratio"))
policy_effectiveness = policy_effectiveness.withColumn("cost_effectiveness", col("policy_total_amount") / col("policy_usage_count"))
budget_analysis = comprehensive_df.groupBy("drug_category").agg(sum("reimbursement_amount").alias("category_budget"), count("record_id").alias("category_cases"), avg("original_price").alias("avg_original_price"))
budget_analysis = budget_analysis.withColumn("budget_per_case", col("category_budget") / col("category_cases")).withColumn("savings_ratio", 1 - (col("category_budget") / (col("category_cases") * col("avg_original_price"))))
high_cost_drugs = comprehensive_df.filter(col("original_price") > 500).groupBy("drug_name", "drug_category").agg(sum("reimbursement_amount").alias("high_cost_reimbursement"), count("record_id").alias("high_cost_cases"), avg("reimbursement_ratio").alias("avg_high_cost_ratio"))
low_reimbursement_drugs = comprehensive_df.filter(col("reimbursement_ratio") < 0.3).groupBy("drug_name", "drug_category").agg(sum("original_price").alias("low_reimbursement_total"), count("record_id").alias("low_reimbursement_cases"))
monthly_budget_trend = comprehensive_df.select("*", col("reimbursement_date").substr(1, 7).alias("month")).groupBy("month").agg(sum("reimbursement_amount").alias("monthly_budget"), count("record_id").alias("monthly_cases")).orderBy("month")
strategy_optimization = comprehensive_df.select("drug_category", "reimbursement_ratio", "original_price", "reimbursement_amount").toPandas()
strategy_optimization["cost_benefit_score"] = strategy_optimization["reimbursement_amount"] / strategy_optimization["original_price"] * 100
category_optimization = strategy_optimization.groupby("drug_category").agg({"cost_benefit_score": ["mean", "std", "count"], "reimbursement_ratio": "mean"}).round(3)
patient_burden_analysis = comprehensive_df.withColumn("patient_payment", col("original_price") - col("reimbursement_amount")).groupBy("drug_category").agg(avg("patient_payment").alias("avg_patient_burden"), max("patient_payment").alias("max_patient_burden"), sum("patient_payment").alias("total_patient_burden"))
result_data = {"reimbursement_efficiency": [{"category": row["drug_category"], "ratio": float(row["reimbursement_ratio"]), "total_reimbursement": float(row["total_reimbursement"]), "total_original": float(row["total_original"]), "count": row["reimbursement_count"], "actual_ratio": float(row["actual_ratio"]), "efficiency_score": float(row["efficiency_score"])} for row in reimbursement_efficiency.orderBy(desc("efficiency_score")).collect()[:15]], "policy_effectiveness": [{"policy_name": row["policy_name"], "policy_type": row["policy_type"], "total_amount": float(row["policy_total_amount"]), "usage_count": row["policy_usage_count"], "avg_ratio": float(row["avg_policy_ratio"]), "cost_effectiveness": float(row["cost_effectiveness"])} for row in policy_effectiveness.orderBy(desc("cost_effectiveness")).collect()[:10]], "budget_analysis": [{"category": row["drug_category"], "total_budget": float(row["category_budget"]), "cases": row["category_cases"], "budget_per_case": float(row["budget_per_case"]), "savings_ratio": float(row["savings_ratio"])} for row in budget_analysis.orderBy(desc("category_budget")).collect()], "high_cost_analysis": [{"drug_name": row["drug_name"], "category": row["drug_category"], "reimbursement": float(row["high_cost_reimbursement"]), "cases": row["high_cost_cases"], "avg_ratio": float(row["avg_high_cost_ratio"])} for row in high_cost_drugs.orderBy(desc("high_cost_reimbursement")).collect()[:10]], "monthly_trend": [{"month": row["month"], "budget": float(row["monthly_budget"]), "cases": row["monthly_cases"]} for row in monthly_budget_trend.collect()], "patient_burden": [{"category": row["drug_category"], "avg_burden": float(row["avg_patient_burden"]), "max_burden": float(row["max_patient_burden"]), "total_burden": float(row["total_patient_burden"])} for row in patient_burden_analysis.orderBy(desc("avg_patient_burden")).collect()]}
return JsonResponse({"code": 200, "data": result_data, "message": "医保报销策略分析完成"})
北京市医保药品数据分析系统文档展示
💖💖作者:计算机毕业设计小途 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目