前言
💖💖作者:计算机程序员小杨 💙💙个人简介:我是一名计算机相关专业的从业者,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。热爱技术,喜欢钻研新工具和框架,也乐于通过代码解决实际问题,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💕💕文末获取源码联系 计算机程序员小杨 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目 计算机毕业设计选题 💜💜
一.开发工具简介
大数据框架:Hadoop+Spark(本次没用Hive,支持定制) 开发语言:Python+Java(两个版本都支持) 后端框架:Django+Spring Boot(Spring+SpringMVC+Mybatis)(两个版本都支持) 前端:Vue+ElementUI+Echarts+HTML+CSS+JavaScript+jQuery 详细技术点:Hadoop、HDFS、Spark、Spark SQL、Pandas、NumPy 数据库:MySQL
二.系统内容简介
北京高档酒店数据可视化分析系统是一个基于大数据技术的酒店行业分析平台,采用Hadoop+Spark大数据框架处理海量酒店数据,结合Django后端框架和Vue+ElementUI+Echarts前端技术栈构建完整的数据分析解决方案。系统运用Spark SQL、Pandas、NumPy等数据处理工具对北京地区高档酒店的多维度信息进行深度挖掘,通过用户画像特征分析揭示消费者行为模式,通过酒店设施水平分析评估服务质量差异,通过空间分布分析展现酒店地理位置优势,通过价格影响因素分析探索定价策略规律,通过口碑评价分析监测市场反馈趋势。系统将复杂的数据分析结果以直观的可视化大屏形式呈现,为酒店管理者、投资决策者和行业研究人员提供科学的数据支撑和决策参考,实现了从数据采集、存储、处理到可视化展示的全流程自动化分析。
三.系统功能演示
四.系统界面展示
五.系统源码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, avg, sum, when, desc, asc
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.clustering import KMeans
import pandas as pd
import numpy as np
spark = SparkSession.builder.appName("BeijingHotelAnalysis").config("spark.sql.adaptive.enabled", "true").getOrCreate()
def user_profile_analysis(user_data_path, booking_data_path):
user_df = spark.read.format("csv").option("header", "true").load(user_data_path)
booking_df = spark.read.format("csv").option("header", "true").load(booking_data_path)
user_booking = user_df.join(booking_df, "user_id", "inner")
user_features = user_booking.groupBy("user_id", "age_group", "gender", "city_level").agg(
count("booking_id").alias("booking_count"),
avg("room_price").alias("avg_price"),
sum("total_amount").alias("total_spending"),
avg("stay_days").alias("avg_stay_days")
)
feature_cols = ["booking_count", "avg_price", "total_spending", "avg_stay_days"]
assembler = VectorAssembler(inputCols=feature_cols, outputCol="features")
feature_data = assembler.transform(user_features)
kmeans = KMeans(k=5, seed=42, featuresCol="features", predictionCol="cluster")
model = kmeans.fit(feature_data)
clustered_data = model.transform(feature_data)
cluster_summary = clustered_data.groupBy("cluster").agg(
count("user_id").alias("user_count"),
avg("booking_count").alias("avg_bookings"),
avg("avg_price").alias("cluster_avg_price"),
avg("total_spending").alias("cluster_spending")
).orderBy("cluster")
gender_distribution = user_booking.groupBy("gender", "age_group").agg(
count("user_id").alias("user_count"),
avg("total_amount").alias("avg_spending")
).orderBy("gender", "age_group")
consumption_pattern = user_booking.withColumn("price_level",
when(col("room_price") < 500, "经济型")
.when(col("room_price") < 1000, "中档型")
.otherwise("豪华型")
).groupBy("age_group", "price_level").count().orderBy("age_group", "price_level")
return cluster_summary.collect(), gender_distribution.collect(), consumption_pattern.collect()
def hotel_facility_analysis(hotel_data_path, facility_data_path):
hotel_df = spark.read.format("csv").option("header", "true").load(hotel_data_path)
facility_df = spark.read.format("csv").option("header", "true").load(facility_data_path)
hotel_facility = hotel_df.join(facility_df, "hotel_id", "inner")
facility_score = hotel_facility.groupBy("hotel_id", "hotel_name", "star_level").agg(
avg("wifi_quality").alias("wifi_score"),
avg("gym_quality").alias("gym_score"),
avg("pool_quality").alias("pool_score"),
avg("restaurant_quality").alias("restaurant_score"),
avg("room_service_quality").alias("service_score")
)
facility_score = facility_score.withColumn("total_facility_score",
(col("wifi_score") + col("gym_score") + col("pool_score") +
col("restaurant_score") + col("service_score")) / 5
)
star_facility_avg = facility_score.groupBy("star_level").agg(
avg("wifi_score").alias("avg_wifi"),
avg("gym_score").alias("avg_gym"),
avg("pool_score").alias("avg_pool"),
avg("restaurant_score").alias("avg_restaurant"),
avg("service_score").alias("avg_service"),
avg("total_facility_score").alias("avg_total_score")
).orderBy("star_level")
facility_ranking = facility_score.withColumn("facility_rank",
when(col("total_facility_score") >= 4.5, "优秀")
.when(col("total_facility_score") >= 3.5, "良好")
.when(col("total_facility_score") >= 2.5, "一般")
.otherwise("较差")
)
facility_distribution = facility_ranking.groupBy("star_level", "facility_rank").count().orderBy("star_level", "facility_rank")
weak_facility_analysis = hotel_facility.select("hotel_id", "hotel_name", "wifi_quality", "gym_quality", "pool_quality", "restaurant_quality", "room_service_quality").filter(
(col("wifi_quality") < 3.0) | (col("gym_quality") < 3.0) |
(col("pool_quality") < 3.0) | (col("restaurant_quality") < 3.0) |
(col("room_service_quality") < 3.0)
)
improvement_suggestions = weak_facility_analysis.withColumn("improvement_area",
when(col("wifi_quality") < 3.0, "网络设施")
.when(col("gym_quality") < 3.0, "健身设施")
.when(col("pool_quality") < 3.0, "游泳设施")
.when(col("restaurant_quality") < 3.0, "餐饮服务")
.otherwise("客房服务")
).groupBy("improvement_area").count().orderBy(desc("count"))
return star_facility_avg.collect(), facility_distribution.collect(), improvement_suggestions.collect()
def price_impact_analysis(hotel_data_path, booking_data_path, location_data_path):
hotel_df = spark.read.format("csv").option("header", "true").load(hotel_data_path)
booking_df = spark.read.format("csv").option("header", "true").load(booking_data_path)
location_df = spark.read.format("csv").option("header", "true").load(location_data_path)
price_data = hotel_df.join(booking_df, "hotel_id", "inner").join(location_df, "hotel_id", "inner")
location_price_impact = price_data.groupBy("district", "subway_distance_level").agg(
avg("room_price").alias("avg_price"),
count("booking_id").alias("booking_count"),
avg("occupancy_rate").alias("avg_occupancy")
).orderBy("district", "subway_distance_level")
season_price_trend = price_data.withColumn("season",
when(col("booking_month").isin([12, 1, 2]), "冬季")
.when(col("booking_month").isin([3, 4, 5]), "春季")
.when(col("booking_month").isin([6, 7, 8]), "夏季")
.otherwise("秋季")
).groupBy("season", "star_level").agg(
avg("room_price").alias("seasonal_avg_price"),
count("booking_id").alias("seasonal_bookings")
).orderBy("season", "star_level")
facility_price_correlation = price_data.groupBy("star_level").agg(
avg("room_price").alias("avg_price"),
avg("facility_score").alias("avg_facility"),
avg("service_score").alias("avg_service"),
count("booking_id").alias("total_bookings")
).withColumn("price_facility_ratio", col("avg_price") / col("avg_facility")).orderBy("star_level")
competition_analysis = price_data.groupBy("district").agg(
count("hotel_id").alias("hotel_count"),
avg("room_price").alias("district_avg_price"),
avg("occupancy_rate").alias("district_occupancy")
).withColumn("competition_intensity",
when(col("hotel_count") >= 20, "激烈")
.when(col("hotel_count") >= 10, "中等")
.otherwise("较低")
).orderBy(desc("hotel_count"))
price_elasticity = price_data.withColumn("price_range",
when(col("room_price") < 800, "低价位")
.when(col("room_price") < 1500, "中价位")
.otherwise("高价位")
).groupBy("price_range").agg(
avg("occupancy_rate").alias("avg_occupancy"),
count("booking_id").alias("booking_volume"),
avg("customer_satisfaction").alias("avg_satisfaction")
).orderBy("price_range")
return location_price_impact.collect(), season_price_trend.collect(), facility_price_correlation.collect(), competition_analysis.collect(), price_elasticity.collect()
六.系统文档展示
结束
💕💕文末获取源码联系 计算机程序员小杨