一、个人简介
💖💖作者:计算机编程果茶熊 💙💙个人简介:曾长期从事计算机专业培训教学,担任过编程老师,同时本人也热爱上课教学,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 计算机毕业设计选题 💕💕文末获取源码联系计算机编程果茶熊
二、系统介绍
大数据框架:Hadoop+Spark(Hive需要定制修改) 开发语言:Java+Python(两个版本都支持) 数据库:MySQL 后端框架:SpringBoot(Spring+SpringMVC+Mybatis)+Django(两个版本都支持) 前端:Vue+Echarts+HTML+CSS+JavaScript+jQuery
上海餐饮数据可视化分析系统是一个基于大数据技术构建的综合性餐饮行业分析平台,采用Hadoop+Spark大数据框架作为底层数据处理引擎,通过HDFS分布式文件系统存储海量餐饮数据,运用Spark SQL进行高效的数据查询与计算。系统后端采用Django框架构建RESTful API服务,前端基于Vue.js结合ElementUI组件库打造用户界面,通过ECharts图表库实现丰富的数据可视化展示。系统核心功能涵盖餐饮分布分析、消费分析、质量分析、竞争分析以及可视化大屏展示,能够从多维度深入挖掘上海地区餐饮行业的经营状况、消费趋势和市场格局。通过整合Pandas和NumPy等数据分析库,系统能够处理复杂的统计计算和数据建模任务,为餐饮企业决策者、市场研究人员以及政府监管部门提供科学的数据支撑和决策依据,助力上海餐饮行业的数字化转型和智能化发展。
三、视频解说
四、部分功能展示
五、部分代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, sum as spark_sum, avg, max as spark_max, min as spark_min, when, desc, asc
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json
import pandas as pd
import numpy as np
spark = SparkSession.builder.appName("ShanghaiRestaurantAnalysis").config("spark.some.config.option", "some-value").getOrCreate()
def restaurant_distribution_analysis(request):
restaurant_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/restaurant_db").option("dbtable", "restaurants").option("user", "root").option("password", "password").load()
district_distribution = restaurant_df.groupBy("district").agg(count("*").alias("restaurant_count"), avg("rating").alias("avg_rating"), spark_sum("monthly_revenue").alias("total_revenue"))
category_distribution = restaurant_df.groupBy("category").agg(count("*").alias("count"), avg("area_size").alias("avg_area"))
price_range_dist = restaurant_df.groupBy("price_range").agg(count("*").alias("count")).orderBy(desc("count"))
operating_hours_analysis = restaurant_df.withColumn("operating_duration", col("close_time") - col("open_time")).groupBy("district").agg(avg("operating_duration").alias("avg_duration"))
popular_districts = district_distribution.filter(col("restaurant_count") > 100).orderBy(desc("restaurant_count"))
competition_density = restaurant_df.groupBy("district", "category").agg(count("*").alias("competitor_count"))
high_density_areas = competition_density.filter(col("competitor_count") > 20)
seasonal_trends = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/restaurant_db").option("dbtable", "monthly_stats").option("user", "root").option("password", "password").load()
monthly_openings = seasonal_trends.groupBy("month").agg(count(when(col("status") == "opened", 1)).alias("new_openings"))
location_score = restaurant_df.withColumn("location_score", col("foot_traffic") * 0.4 + col("accessibility") * 0.3 + col("parking_availability") * 0.3)
top_locations = location_score.select("restaurant_name", "district", "location_score").orderBy(desc("location_score")).limit(50)
rent_analysis = restaurant_df.groupBy("district").agg(avg("monthly_rent").alias("avg_rent"), spark_max("monthly_rent").alias("max_rent"), spark_min("monthly_rent").alias("min_rent"))
result_data = {"district_stats": district_distribution.toPandas().to_dict('records'), "category_distribution": category_distribution.toPandas().to_dict('records'), "popular_areas": popular_districts.toPandas().to_dict('records')}
return JsonResponse(result_data)
def consumption_analysis(request):
consumption_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/restaurant_db").option("dbtable", "consumption_records").option("user", "root").option("password", "password").load()
daily_consumption = consumption_df.groupBy("date").agg(spark_sum("amount").alias("daily_total"), count("*").alias("order_count"), avg("amount").alias("avg_order_value"))
peak_hours = consumption_df.groupBy("hour").agg(spark_sum("amount").alias("hourly_revenue"), count("*").alias("order_frequency")).orderBy(desc("hourly_revenue"))
customer_segments = consumption_df.groupBy("customer_age_group").agg(avg("amount").alias("avg_spending"), count("*").alias("frequency"))
payment_methods = consumption_df.groupBy("payment_method").agg(spark_sum("amount").alias("total_amount"), count("*").alias("usage_count"))
seasonal_spending = consumption_df.groupBy("season").agg(avg("amount").alias("seasonal_avg"), spark_sum("amount").alias("seasonal_total"))
cuisine_preference = consumption_df.join(spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/restaurant_db").option("dbtable", "restaurants").option("user", "root").option("password", "password").load(), "restaurant_id").groupBy("cuisine_type").agg(spark_sum("amount").alias("revenue"), count("*").alias("orders"))
spending_trends = consumption_df.withColumn("month_year", col("date").substr(1, 7)).groupBy("month_year").agg(avg("amount").alias("monthly_avg_spending"))
customer_loyalty = consumption_df.groupBy("customer_id").agg(count("*").alias("visit_frequency"), spark_sum("amount").alias("total_spent")).withColumn("loyalty_score", col("visit_frequency") * col("total_spent") / 1000)
district_consumption = consumption_df.join(spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/restaurant_db").option("dbtable", "restaurants").option("user", "root").option("password", "password").load(), "restaurant_id").groupBy("district").agg(spark_sum("amount").alias("district_revenue"))
weekend_vs_weekday = consumption_df.withColumn("day_type", when(col("day_of_week").isin([6, 7]), "weekend").otherwise("weekday")).groupBy("day_type").agg(avg("amount").alias("avg_spending"), count("*").alias("order_count"))
promotional_impact = consumption_df.groupBy("has_promotion").agg(avg("amount").alias("avg_with_promo"), count("*").alias("promo_orders"))
group_size_analysis = consumption_df.groupBy("group_size").agg(avg("amount").alias("avg_per_group"), spark_sum("amount").alias("total_by_group_size"))
result_data = {"daily_trends": daily_consumption.toPandas().to_dict('records'), "peak_analysis": peak_hours.toPandas().to_dict('records'), "customer_insights": customer_segments.toPandas().to_dict('records')}
return JsonResponse(result_data)
def quality_analysis(request):
quality_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/restaurant_db").option("dbtable", "quality_metrics").option("user", "root").option("password", "password").load()
rating_distribution = quality_df.groupBy("rating_range").agg(count("*").alias("restaurant_count"), avg("overall_score").alias("avg_score"))
service_quality = quality_df.groupBy("district").agg(avg("service_rating").alias("avg_service"), avg("food_quality").alias("avg_food"), avg("environment_rating").alias("avg_environment"))
complaint_analysis = quality_df.filter(col("complaint_count") > 0).groupBy("complaint_type").agg(count("*").alias("complaint_frequency"), avg("resolution_time").alias("avg_resolution_days"))
hygiene_scores = quality_df.groupBy("hygiene_grade").agg(count("*").alias("restaurant_count"), avg("health_inspection_score").alias("avg_inspection_score"))
customer_satisfaction = quality_df.withColumn("satisfaction_level", when(col("overall_score") >= 4.5, "excellent").when(col("overall_score") >= 4.0, "good").when(col("overall_score") >= 3.5, "average").otherwise("poor")).groupBy("satisfaction_level").agg(count("*").alias("count"))
repeat_customer_rate = quality_df.groupBy("restaurant_id").agg(avg("repeat_customer_percentage").alias("loyalty_rate")).join(spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/restaurant_db").option("dbtable", "restaurants").option("user", "root").option("password", "password").load(), "restaurant_id")
food_safety_incidents = quality_df.filter(col("safety_incidents") > 0).groupBy("incident_severity").agg(count("*").alias("incident_count"), avg("days_to_resolve").alias("avg_resolution"))
online_review_sentiment = quality_df.groupBy("review_sentiment").agg(count("*").alias("review_count"), avg("sentiment_score").alias("avg_sentiment"))
quality_improvement_trends = quality_df.withColumn("quarter", col("inspection_date").substr(1, 7)).groupBy("quarter").agg(avg("overall_score").alias("quarterly_avg_score"))
staff_training_impact = quality_df.groupBy("staff_training_hours").agg(avg("service_rating").alias("service_improvement"), avg("customer_satisfaction").alias("satisfaction_improvement"))
certification_analysis = quality_df.groupBy("certifications").agg(count("*").alias("certified_count"), avg("overall_score").alias("certified_avg_score"))
response_time_analysis = quality_df.groupBy("response_time_category").agg(count("*").alias("restaurant_count"), avg("customer_wait_time").alias("avg_wait_minutes"))
result_data = {"quality_distribution": rating_distribution.toPandas().to_dict('records'), "service_metrics": service_quality.toPandas().to_dict('records'), "satisfaction_analysis": customer_satisfaction.toPandas().to_dict('records')}
return JsonResponse(result_data)
六、部分文档展示
七、END
💕💕文末获取源码联系计算机编程果茶熊