前言
💖💖作者:计算机程序员小杨 💙💙个人简介:我是一名计算机相关专业的从业者,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。热爱技术,喜欢钻研新工具和框架,也乐于通过代码解决实际问题,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💕💕文末获取源码联系 计算机程序员小杨 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目 计算机毕业设计选题 💜💜
一.开发工具简介
大数据框架:Hadoop+Spark(本次没用Hive,支持定制) 开发语言:Python+Java(两个版本都支持) 后端框架:Django+Spring Boot(Spring+SpringMVC+Mybatis)(两个版本都支持) 前端:Vue+ElementUI+Echarts+HTML+CSS+JavaScript+jQuery 详细技术点:Hadoop、HDFS、Spark、Spark SQL、Pandas、NumPy 数据库:MySQL
二.系统内容简介
广西药店数据可视化分析系统是一个基于大数据技术构建的药店业务分析平台,采用Hadoop+Spark大数据框架作为底层数据处理引擎,结合Python语言开发,运用Django后端框架和Vue+ElementUI+Echarts前端技术栈进行系统构建。该系统能够对广西地区药店经营数据进行深度挖掘和可视化展示,提供品牌市场份额分析、各地市药店数量统计、城市品牌分布情况、医保服务开通状态及关联性分析等核心功能模块。通过Spark SQL进行大规模数据查询处理,结合Pandas和NumPy进行数据科学计算,系统还具备药店热词词云生成、地理位置热力图展示、核心品牌布局分析、城市服务覆盖评估以及区域密度计算等高级分析能力。整个系统以MySQL作为数据存储基础,通过HDFS进行大数据文件管理,为药店行业的经营决策、市场布局优化和政策制定提供了科学的数据支撑和直观的可视化展示平台。
三.系统功能演示
四.系统界面展示
五.系统源码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, sum, avg, desc, asc, when, regexp_extract, split, explode
from pyspark.ml.feature import Tokenizer, StopWordsRemover
import pandas as pd
import numpy as np
from collections import Counter
spark = SparkSession.builder.appName("GuangxiPharmacyAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()
def analyze_brand_market_share(pharmacy_data):
brand_df = spark.createDataFrame(pharmacy_data)
brand_revenue = brand_df.groupBy("brand_name").agg(sum("monthly_revenue").alias("total_revenue"), count("*").alias("store_count"))
total_market_revenue = brand_df.agg(sum("monthly_revenue").alias("market_total")).collect()[0]["market_total"]
market_share_df = brand_revenue.withColumn("market_share_percent", (col("total_revenue") / total_market_revenue * 100).cast("decimal(10,2)"))
market_share_df = market_share_df.withColumn("avg_store_revenue", (col("total_revenue") / col("store_count")).cast("decimal(10,2)"))
market_share_df = market_share_df.withColumn("market_position", when(col("market_share_percent") >= 15, "领导品牌").when(col("market_share_percent") >= 8, "主要品牌").when(col("market_share_percent") >= 3, "挑战品牌").otherwise("跟随品牌"))
top_brands = market_share_df.orderBy(desc("market_share_percent")).limit(20)
brand_concentration_ratio = top_brands.agg(sum("market_share_percent").alias("cr_ratio")).collect()[0]["cr_ratio"]
competitive_intensity = "高度集中" if brand_concentration_ratio > 70 else "中度集中" if brand_concentration_ratio > 40 else "分散竞争"
result_df = top_brands.withColumn("competitive_intensity", when(col("market_share_percent") > 0, competitive_intensity))
brand_growth_potential = result_df.withColumn("growth_potential_score", (col("avg_store_revenue") / col("market_share_percent") * 10).cast("decimal(8,2)"))
final_analysis = brand_growth_potential.select("brand_name", "total_revenue", "store_count", "market_share_percent", "avg_store_revenue", "market_position", "growth_potential_score").orderBy(desc("market_share_percent"))
pandas_result = final_analysis.toPandas()
market_dynamics = {"total_brands": brand_df.select("brand_name").distinct().count(), "market_concentration": competitive_intensity, "average_market_share": float(pandas_result["market_share_percent"].mean()), "market_leader": pandas_result.iloc[0]["brand_name"] if len(pandas_result) > 0 else None}
return {"brand_analysis": pandas_result.to_dict("records"), "market_metrics": market_dynamics}
def analyze_city_pharmacy_distribution(pharmacy_data):
city_df = spark.createDataFrame(pharmacy_data)
city_stats = city_df.groupBy("city_name").agg(count("*").alias("pharmacy_count"), sum("registered_capital").alias("total_capital"), avg("store_area").alias("avg_area"), count(when(col("medical_insurance_enabled") == True, 1)).alias("insurance_enabled_count"))
city_stats = city_stats.withColumn("insurance_coverage_rate", (col("insurance_enabled_count") / col("pharmacy_count") * 100).cast("decimal(8,2)"))
city_stats = city_stats.withColumn("capital_per_pharmacy", (col("total_capital") / col("pharmacy_count")).cast("decimal(12,2)"))
total_pharmacies = city_df.count()
city_stats = city_stats.withColumn("city_market_share", (col("pharmacy_count") / total_pharmacies * 100).cast("decimal(8,2)"))
city_ranking = city_stats.withColumn("development_index", (col("pharmacy_count") * 0.4 + col("insurance_coverage_rate") * 0.3 + col("capital_per_pharmacy") / 10000 * 0.3).cast("decimal(8,2)"))
city_classification = city_ranking.withColumn("city_tier", when(col("development_index") >= 80, "一线城市").when(col("development_index") >= 50, "二线城市").when(col("development_index") >= 25, "三线城市").otherwise("发展中城市"))
regional_analysis = city_classification.withColumn("density_category", when(col("pharmacy_count") >= 200, "高密度").when(col("pharmacy_count") >= 100, "中密度").when(col("pharmacy_count") >= 50, "低密度").otherwise("稀疏分布"))
growth_potential = regional_analysis.withColumn("growth_score", when(col("insurance_coverage_rate") < 60, col("development_index") * 1.2).otherwise(col("development_index")))
market_saturation = growth_potential.withColumn("saturation_level", when(col("pharmacy_count") / col("avg_area") > 0.5, "饱和").when(col("pharmacy_count") / col("avg_area") > 0.3, "适中").otherwise("不足"))
comprehensive_ranking = market_saturation.select("city_name", "pharmacy_count", "total_capital", "avg_area", "insurance_coverage_rate", "city_market_share", "development_index", "city_tier", "density_category", "growth_score", "saturation_level").orderBy(desc("development_index"))
pandas_result = comprehensive_ranking.toPandas()
regional_summary = {"total_cities": city_df.select("city_name").distinct().count(), "avg_pharmacies_per_city": float(pandas_result["pharmacy_count"].mean()), "highest_coverage_city": pandas_result.loc[pandas_result["insurance_coverage_rate"].idxmax(), "city_name"], "most_developed_city": pandas_result.iloc[0]["city_name"] if len(pandas_result) > 0 else None}
return {"city_distribution": pandas_result.to_dict("records"), "regional_overview": regional_summary}
def analyze_medical_insurance_services(pharmacy_data):
insurance_df = spark.createDataFrame(pharmacy_data)
service_analysis = insurance_df.groupBy("city_name", "district_name").agg(count("*").alias("total_pharmacies"), count(when(col("medical_insurance_enabled") == True, 1)).alias("insurance_enabled"), count(when(col("chronic_disease_service") == True, 1)).alias("chronic_service_count"), count(when(col("online_settlement") == True, 1)).alias("online_settlement_count"))
service_analysis = service_analysis.withColumn("insurance_penetration_rate", (col("insurance_enabled") / col("total_pharmacies") * 100).cast("decimal(8,2)"))
service_analysis = service_analysis.withColumn("chronic_service_rate", (col("chronic_service_count") / col("total_pharmacies") * 100).cast("decimal(8,2)"))
service_analysis = service_analysis.withColumn("digital_service_rate", (col("online_settlement_count") / col("total_pharmacies") * 100).cast("decimal(8,2)"))
service_quality_score = service_analysis.withColumn("service_quality_index", (col("insurance_penetration_rate") * 0.5 + col("chronic_service_rate") * 0.3 + col("digital_service_rate") * 0.2).cast("decimal(8,2)"))
service_gaps = service_quality_score.withColumn("service_gap_level", when(col("service_quality_index") >= 80, "优秀服务").when(col("service_quality_index") >= 60, "良好服务").when(col("service_quality_index") >= 40, "基础服务").otherwise("服务不足"))
correlation_analysis = service_gaps.withColumn("service_correlation_factor", (col("insurance_enabled") + col("chronic_service_count") + col("online_settlement_count")) / col("total_pharmacies"))
service_development_potential = correlation_analysis.withColumn("improvement_priority", when(col("insurance_penetration_rate") < 70, "医保覆盖优先").when(col("chronic_service_rate") < 50, "慢病服务优先").when(col("digital_service_rate") < 30, "数字化优先").otherwise("全面提升"))
regional_service_comparison = service_development_potential.withColumn("relative_performance", when(col("service_quality_index") > 70, "领先区域").when(col("service_quality_index") > 50, "平均水平").otherwise("待提升区域"))
service_efficiency = regional_service_comparison.withColumn("service_efficiency_ratio", (col("insurance_enabled") + col("chronic_service_count") * 1.5 + col("online_settlement_count") * 2) / col("total_pharmacies"))
final_insurance_analysis = service_efficiency.select("city_name", "district_name", "total_pharmacies", "insurance_enabled", "insurance_penetration_rate", "chronic_service_rate", "digital_service_rate", "service_quality_index", "service_gap_level", "improvement_priority", "relative_performance", "service_efficiency_ratio").orderBy(desc("service_quality_index"))
pandas_result = final_insurance_analysis.toPandas()
national_benchmarks = {"avg_insurance_rate": float(pandas_result["insurance_penetration_rate"].mean()), "top_performing_district": pandas_result.iloc[0]["district_name"] if len(pandas_result) > 0 else None, "service_coverage_variance": float(pandas_result["service_quality_index"].std()), "digital_adoption_rate": float(pandas_result["digital_service_rate"].mean())}
return {"insurance_analysis": pandas_result.to_dict("records"), "service_benchmarks": national_benchmarks}
六.系统文档展示
结束
💕💕文末获取源码联系 计算机程序员小杨