💖💖作者:计算机毕业设计小途 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目
@TOC
基于大数据的广西药店数据可视化分析系统介绍
《基于大数据的广西药店数据可视化分析系统》是一个采用现代大数据技术栈构建的综合性数据分析平台,该系统以Hadoop分布式存储框架为基础,结合Spark大数据处理引擎,实现对广西地区药店相关数据的高效采集、存储、处理和分析。系统采用前后端分离的架构设计,后端基于Spring Boot框架提供RESTful API服务,前端使用Vue.js结合ElementUI组件库构建用户界面,通过Echarts图表库实现丰富的数据可视化展示效果。在数据处理层面,系统充分利用Spark SQL进行大规模数据查询分析,配合Python的Pandas和NumPy科学计算库进行数据清洗和统计计算,所有处理后的数据存储在MySQL数据库中以支持快速查询。系统核心功能涵盖多维度的药店数据分析,包括品牌市场份额分析、各地市药店数量统计分析、城市品牌分布分析、医保服务开通情况分析以及医保服务关联性分析等,同时提供药店热词词云分析、药店分布热力图分析、核心品牌布局分析、城市服务覆盖分析和核心区域密度分析等深度洞察功能,通过数据大屏可视化界面集中展示各类分析结果,为药店行业的经营决策、市场布局优化和政策制定提供科学的数据支撑,充分体现了大数据技术在传统零售药店行业数字化转型中的重要应用价值。
基于大数据的广西药店数据可视化分析系统演示视频
基于大数据的广西药店数据可视化分析系统演示图片
基于大数据的广西药店数据可视化分析系统代码展示
spark = SparkSession.builder.appName("GuangxiPharmacyAnalysis").config("spark.sql.adaptive.enabled", "true").getOrCreate()
def analyze_brand_market_share():
pharmacy_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/pharmacy_db").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "pharmacy_info").option("user", "root").option("password", "123456").load()
brand_count_df = pharmacy_df.groupBy("brand_name").count().withColumnRenamed("count", "pharmacy_count")
total_count = pharmacy_df.count()
brand_share_df = brand_count_df.withColumn("market_share", col("pharmacy_count") / total_count * 100)
brand_share_df = brand_share_df.orderBy(col("market_share").desc())
top_brands_df = brand_share_df.limit(10)
other_brands_count = brand_count_df.filter(~col("brand_name").isin([row.brand_name for row in top_brands_df.collect()])).agg(sum("pharmacy_count")).collect()[0][0]
other_brands_share = (other_brands_count / total_count) * 100 if other_brands_count else 0
result_data = []
for row in top_brands_df.collect():
result_data.append({"brand_name": row.brand_name, "pharmacy_count": row.pharmacy_count, "market_share": round(row.market_share, 2)})
if other_brands_count > 0:
result_data.append({"brand_name": "其他品牌", "pharmacy_count": other_brands_count, "market_share": round(other_brands_share, 2)})
brand_trend_df = pharmacy_df.filter(col("create_time").isNotNull()).withColumn("year_month", date_format(col("create_time"), "yyyy-MM")).groupBy("brand_name", "year_month").count().orderBy("year_month")
trend_data = {}
for row in brand_trend_df.collect():
if row.brand_name not in trend_data:
trend_data[row.brand_name] = []
trend_data[row.brand_name].append({"month": row.year_month, "count": row.count})
return {"market_share_data": result_data, "trend_data": trend_data, "total_pharmacy_count": total_count}
def analyze_city_pharmacy_distribution():
pharmacy_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/pharmacy_db").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "pharmacy_info").option("user", "root").option("password", "123456").load()
city_stats_df = pharmacy_df.groupBy("city_name").agg(count("*").alias("total_count"), countDistinct("brand_name").alias("brand_count"), avg("longitude").alias("avg_longitude"), avg("latitude").alias("avg_latitude"))
medical_insurance_df = pharmacy_df.filter(col("medical_insurance") == 1).groupBy("city_name").count().withColumnRenamed("count", "insurance_count")
city_complete_df = city_stats_df.join(medical_insurance_df, "city_name", "left").fillna(0, ["insurance_count"])
city_complete_df = city_complete_df.withColumn("insurance_rate", round((col("insurance_count") / col("total_count")) * 100, 2))
city_ranking_df = city_complete_df.orderBy(col("total_count").desc())
density_df = pharmacy_df.groupBy("city_name", "district_name").count().withColumnRenamed("count", "district_count")
city_density_df = density_df.groupBy("city_name").agg(max("district_count").alias("max_district_count"), min("district_count").alias("min_district_count"), avg("district_count").alias("avg_district_count"))
final_result_df = city_ranking_df.join(city_density_df, "city_name", "left")
result_data = []
for row in final_result_df.collect():
city_data = {"city_name": row.city_name, "total_count": row.total_count, "brand_count": row.brand_count, "insurance_count": row.insurance_count, "insurance_rate": row.insurance_rate, "avg_longitude": round(row.avg_longitude, 6), "avg_latitude": round(row.avg_latitude, 6), "max_district_density": row.max_district_count, "min_district_density": row.min_district_count, "avg_district_density": round(row.avg_district_count, 2)}
result_data.append(city_data)
growth_analysis_df = pharmacy_df.filter(col("create_time").isNotNull()).withColumn("year", year(col("create_time"))).groupBy("city_name", "year").count().orderBy("city_name", "year")
growth_data = {}
for row in growth_analysis_df.collect():
if row.city_name not in growth_data:
growth_data[row.city_name] = []
growth_data[row.city_name].append({"year": row.year, "count": row.count})
return {"city_distribution_data": result_data, "growth_trend_data": growth_data}
def analyze_pharmacy_heatmap_data():
pharmacy_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/pharmacy_db").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "pharmacy_info").option("user", "root").option("password", "123456").load()
coordinate_df = pharmacy_df.select("longitude", "latitude", "city_name", "district_name", "brand_name", "medical_insurance").filter((col("longitude").isNotNull()) & (col("latitude").isNotNull()) & (col("longitude") != 0) & (col("latitude") != 0))
grid_size = 0.01
coordinate_df = coordinate_df.withColumn("grid_lng", floor(col("longitude") / grid_size) * grid_size).withColumn("grid_lat", floor(col("latitude") / grid_size) * grid_size)
heatmap_df = coordinate_df.groupBy("grid_lng", "grid_lat").agg(count("*").alias("pharmacy_count"), countDistinct("brand_name").alias("brand_diversity"), sum(when(col("medical_insurance") == 1, 1).otherwise(0)).alias("insurance_count"))
heatmap_df = heatmap_df.withColumn("center_lng", col("grid_lng") + grid_size/2).withColumn("center_lat", col("grid_lat") + grid_size/2)
heatmap_df = heatmap_df.withColumn("density_level", when(col("pharmacy_count") >= 20, "high").when(col("pharmacy_count") >= 10, "medium").when(col("pharmacy_count") >= 5, "low").otherwise("sparse"))
brand_heatmap_df = coordinate_df.groupBy("grid_lng", "grid_lat", "brand_name").count().withColumnRenamed("count", "brand_count")
dominant_brand_df = brand_heatmap_df.withColumn("row_number", row_number().over(Window.partitionBy("grid_lng", "grid_lat").orderBy(col("brand_count").desc()))).filter(col("row_number") == 1).select("grid_lng", "grid_lat", "brand_name", "brand_count").withColumnRenamed("brand_name", "dominant_brand").withColumnRenamed("brand_count", "dominant_count")
final_heatmap_df = heatmap_df.join(dominant_brand_df, ["grid_lng", "grid_lat"], "left")
city_heatmap_df = coordinate_df.groupBy("city_name").agg(avg("longitude").alias("city_center_lng"), avg("latitude").alias("city_center_lat"), count("*").alias("city_total_count"))
result_heatmap_data = []
for row in final_heatmap_df.collect():
heatmap_point = {"longitude": row.center_lng, "latitude": row.center_lat, "value": row.pharmacy_count, "brand_diversity": row.brand_diversity, "insurance_count": row.insurance_count, "density_level": row.density_level, "dominant_brand": row.dominant_brand if row.dominant_brand else "无", "dominant_count": row.dominant_count if row.dominant_count else 0}
result_heatmap_data.append(heatmap_point)
city_center_data = []
for row in city_heatmap_df.collect():
city_center = {"city_name": row.city_name, "center_longitude": round(row.city_center_lng, 6), "center_latitude": round(row.city_center_lat, 6), "total_count": row.city_total_count}
city_center_data.append(city_center)
return {"heatmap_points": result_heatmap_data, "city_centers": city_center_data, "grid_size": grid_size}
基于大数据的广西药店数据可视化分析系统文档展示
💖💖作者:计算机毕业设计小途 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目