一、个人简介
- 💖💖作者:计算机编程果茶熊
- 💙💙个人简介:曾长期从事计算机专业培训教学,担任过编程老师,同时本人也热爱上课教学,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我!
- 💛💛想说的话:感谢大家的关注与支持!
- 💜💜
- 网站实战项目
- 安卓/小程序实战项目
- 大数据实战项目
- 计算机毕业设计选题
- 💕💕文末获取源码联系计算机编程果茶熊
二、系统介绍
- 大数据框架:Hadoop+Spark(Hive需要定制修改)
- 开发语言:Java+Python(两个版本都支持)
- 数据库:MySQL
- 后端框架:SpringBoot(Spring+SpringMVC+Mybatis)+Django(两个版本都支持)
- 前端:Vue+Echarts+HTML+CSS+JavaScript+jQuery
- 本系统是一套基于大数据技术栈构建的国内旅游景点游客数据分析系统,采用Hadoop分布式存储框架结合Spark内存计算引擎作为核心大数据处理平台,通过HDFS实现海量旅游数据的可靠存储,运用Spark SQL进行高效的数据查询与分析处理。系统支持Python+Django和Java+Spring Boot两种后端技术方案,前端采用Vue+ElementUI构建现代化的用户界面,集成Echarts图表库实现数据的可视化展示,同时运用Pandas和NumPy等Python科学计算库进行深度数据挖掘分析。系统功能涵盖完整的旅游大数据分析链条,包括系统首页展示、个人中心管理、用户权限控制、旅游景点信息维护、景点满意度深度分析、区域旅游市场趋势研究、时序环境影响因素分析、游客消费行为模式挖掘、游客群体画像精准刻画等核心模块,并提供专业的大屏数据展示功能,支持实时监控和多维度数据呈现。整个系统基于MySQL数据库进行数据持久化存储,通过大数据技术实现对国内旅游市场的全方位数据分析,为旅游行业决策提供科学的数据支撑,是一套集数据采集、存储、处理、分析、可视化于一体的综合性旅游大数据分析平台。
三、国内旅游景点游客数据分析系统-视频解说
毕设想用大数据技术却不知从何下手?旅游景点游客数据分析系统完整实现方案|大数据毕业设计
四、国内旅游景点游客数据分析系统-功能展示
五、国内旅游景点游客数据分析系统-代码展示
# 核心功能1:游客群体画像分析
def analyze_tourist_profile(self, region_id=None, date_range=None):
spark = SparkSession.builder.appName("TouristProfileAnalysis").getOrCreate()
# 从HDFS读取游客数据
tourist_df = spark.read.parquet("hdfs://localhost:9000/tourist_data/visitor_records.parquet")
# 数据预处理和清洗
cleaned_df = tourist_df.filter(col("age").between(18, 80)) \
.filter(col("gender").isin(["男", "女"])) \
.filter(col("visit_duration") > 0)
if region_id:
cleaned_df = cleaned_df.filter(col("region_id") == region_id)
if date_range:
cleaned_df = cleaned_df.filter(col("visit_date").between(date_range[0], date_range[1]))
# 年龄分组统计
age_groups = cleaned_df.withColumn("age_group",
when(col("age") < 25, "18-24岁")
.when(col("age") < 35, "25-34岁")
.when(col("age") < 45, "35-44岁")
.when(col("age") < 55, "45-54岁")
.otherwise("55岁以上"))
age_distribution = age_groups.groupBy("age_group").agg(
count("*").alias("count"),
avg("consumption_amount").alias("avg_consumption"),
avg("visit_duration").alias("avg_duration")
).collect()
# 消费能力分析
consumption_analysis = cleaned_df.withColumn("consumption_level",
when(col("consumption_amount") < 200, "低消费")
.when(col("consumption_amount") < 500, "中等消费")
.when(col("consumption_amount") < 1000, "高消费")
.otherwise("超高消费"))
consumption_stats = consumption_analysis.groupBy("consumption_level", "gender").agg(
count("*").alias("visitor_count"),
avg("satisfaction_score").alias("avg_satisfaction")
).collect()
# 地域来源分析
region_stats = cleaned_df.groupBy("source_province", "source_city").agg(
count("*").alias("visitor_count"),
sum("consumption_amount").alias("total_consumption")
).orderBy(desc("visitor_count")).limit(20).collect()
# 兴趣偏好分析
preference_df = cleaned_df.select("visitor_id", "visited_attractions").rdd.flatMap(
lambda row: [(attraction.strip(), 1) for attraction in row.visited_attractions.split(",")]
).toDF(["attraction", "count"])
attraction_popularity = preference_df.groupBy("attraction").agg(
sum("count").alias("total_visits")
).orderBy(desc("total_visits")).limit(15).collect()
profile_result = {
"age_distribution": [row.asDict() for row in age_distribution],
"consumption_analysis": [row.asDict() for row in consumption_stats],
"region_distribution": [row.asDict() for row in region_stats],
"attraction_preferences": [row.asDict() for row in attraction_popularity],
"total_analyzed": cleaned_df.count()
}
spark.stop()
return profile_result
# 核心功能2:景点满意度分析
def analyze_scenic_satisfaction(self, scenic_spot_id=None, time_period=None):
spark = SparkSession.builder.appName("SatisfactionAnalysis").getOrCreate()
# 读取评价数据和游客数据
review_df = spark.read.parquet("hdfs://localhost:9000/review_data/satisfaction_records.parquet")
visitor_df = spark.read.parquet("hdfs://localhost:9000/tourist_data/visitor_records.parquet")
# 数据关联和筛选
joined_df = review_df.join(visitor_df, "visitor_id", "inner")
if scenic_spot_id:
joined_df = joined_df.filter(col("scenic_spot_id") == scenic_spot_id)
if time_period:
joined_df = joined_df.filter(col("review_date").between(time_period[0], time_period[1]))
# 总体满意度统计
overall_satisfaction = joined_df.agg(
avg("satisfaction_score").alias("avg_satisfaction"),
count("*").alias("total_reviews"),
sum(when(col("satisfaction_score") >= 4, 1).otherwise(0)).alias("positive_reviews"),
sum(when(col("satisfaction_score") <= 2, 1).otherwise(0)).alias("negative_reviews")
).collect()[0]
positive_rate = (overall_satisfaction.positive_reviews / overall_satisfaction.total_reviews) * 100
negative_rate = (overall_satisfaction.negative_reviews / overall_satisfaction.total_reviews) * 100
# 各维度满意度分析
dimension_satisfaction = joined_df.agg(
avg("service_score").alias("avg_service"),
avg("environment_score").alias("avg_environment"),
avg("facility_score").alias("avg_facility"),
avg("price_score").alias("avg_price"),
avg("transportation_score").alias("avg_transportation")
).collect()[0]
# 不同游客群体满意度对比
group_satisfaction = joined_df.groupBy("age_group", "gender").agg(
avg("satisfaction_score").alias("avg_satisfaction"),
count("*").alias("review_count")
).filter(col("review_count") >= 10).orderBy(desc("avg_satisfaction")).collect()
# 时间趋势分析
time_trend = joined_df.withColumn("review_month",
date_format(col("review_date"), "yyyy-MM")) \
.groupBy("review_month").agg(
avg("satisfaction_score").alias("monthly_satisfaction"),
count("*").alias("monthly_reviews")
).orderBy("review_month").collect()
# 满意度等级分布
satisfaction_distribution = joined_df.withColumn("satisfaction_level",
when(col("satisfaction_score") >= 4.5, "非常满意")
.when(col("satisfaction_score") >= 3.5, "满意")
.when(col("satisfaction_score") >= 2.5, "一般")
.when(col("satisfaction_score") >= 1.5, "不满意")
.otherwise("非常不满意"))
level_stats = satisfaction_distribution.groupBy("satisfaction_level").agg(
count("*").alias("count"),
(count("*") * 100.0 / overall_satisfaction.total_reviews).alias("percentage")
).collect()
satisfaction_result = {
"overall_metrics": {
"average_satisfaction": float(overall_satisfaction.avg_satisfaction),
"total_reviews": overall_satisfaction.total_reviews,
"positive_rate": round(positive_rate, 2),
"negative_rate": round(negative_rate, 2)
},
"dimension_scores": dimension_satisfaction.asDict(),
"group_comparison": [row.asDict() for row in group_satisfaction],
"time_trend": [row.asDict() for row in time_trend],
"satisfaction_distribution": [row.asDict() for row in level_stats]
}
spark.stop()
return satisfaction_result
# 核心功能3:游客消费行为分析
def analyze_consumption_behavior(self, analysis_period=None, min_consumption=0):
spark = SparkSession.builder.appName("ConsumptionBehaviorAnalysis").getOrCreate()
# 读取消费数据
consumption_df = spark.read.parquet("hdfs://localhost:9000/consumption_data/purchase_records.parquet")
visitor_df = spark.read.parquet("hdfs://localhost:9000/tourist_data/visitor_records.parquet")
# 数据关联和预处理
merged_df = consumption_df.join(visitor_df, "visitor_id", "inner")
if analysis_period:
merged_df = merged_df.filter(col("purchase_date").between(analysis_period[0], analysis_period[1]))
filtered_df = merged_df.filter(col("purchase_amount") >= min_consumption)
# 消费类别分析
category_analysis = filtered_df.groupBy("product_category").agg(
sum("purchase_amount").alias("total_revenue"),
count("*").alias("transaction_count"),
avg("purchase_amount").alias("avg_transaction"),
countDistinct("visitor_id").alias("unique_customers")
).orderBy(desc("total_revenue")).collect()
# 消费时段分析
time_pattern = filtered_df.withColumn("purchase_hour", hour(col("purchase_timestamp"))) \
.withColumn("purchase_weekday", dayofweek(col("purchase_date"))) \
.groupBy("purchase_hour", "purchase_weekday").agg(
sum("purchase_amount").alias("hourly_revenue"),
count("*").alias("hourly_transactions")
).collect()
# 客户价值分析RFM模型
current_date = datetime.now()
rfm_df = filtered_df.groupBy("visitor_id").agg(
max("purchase_date").alias("last_purchase_date"),
count("*").alias("frequency"),
sum("purchase_amount").alias("monetary")
).withColumn("recency",
datediff(lit(current_date), col("last_purchase_date")))
# RFM分值计算
rfm_scored = rfm_df.withColumn("r_score",
when(col("recency") <= 30, 5)
.when(col("recency") <= 60, 4)
.when(col("recency") <= 90, 3)
.when(col("recency") <= 180, 2)
.otherwise(1)) \
.withColumn("f_score",
when(col("frequency") >= 10, 5)
.when(col("frequency") >= 7, 4)
.when(col("frequency") >= 5, 3)
.when(col("frequency") >= 3, 2)
.otherwise(1)) \
.withColumn("m_score",
when(col("monetary") >= 2000, 5)
.when(col("monetary") >= 1000, 4)
.when(col("monetary") >= 500, 3)
.when(col("monetary") >= 200, 2)
.otherwise(1))
# 客户分群
customer_segments = rfm_scored.withColumn("customer_segment",
when((col("r_score") >= 4) & (col("f_score") >= 4) & (col("m_score") >= 4), "高价值客户")
.when((col("r_score") >= 3) & (col("f_score") >= 3), "重要客户")
.when((col("r_score") >= 4) & (col("f_score") <= 2), "新客户")
.when((col("r_score") <= 2) & (col("f_score") >= 3), "流失预警客户")
.otherwise("普通客户"))
segment_stats = customer_segments.groupBy("customer_segment").agg(
count("*").alias("customer_count"),
avg("monetary").alias("avg_spending"),
sum("monetary").alias("total_contribution")
).collect()
# 消费关联分析
product_combinations = filtered_df.filter(col("product_category").isNotNull()) \
.groupBy("visitor_id", "purchase_date") \
.agg(collect_list("product_category").alias("products")) \
.filter(size(col("products")) >= 2)
# 季节性消费趋势
seasonal_trend = filtered_df.withColumn("season",
when(month(col("purchase_date")).isin(3, 4, 5), "春季")
.when(month(col("purchase_date")).isin(6, 7, 8), "夏季")
.when(month(col("purchase_date")).isin(9, 10, 11), "秋季")
.otherwise("冬季")) \
.groupBy("season").agg(
sum("purchase_amount").alias("seasonal_revenue"),
avg("purchase_amount").alias("seasonal_avg"),
count("*").alias("seasonal_transactions")
).collect()
consumption_result = {
"category_performance": [row.asDict() for row in category_analysis],
"time_patterns": [row.asDict() for row in time_pattern],
"customer_segments": [row.asDict() for row in segment_stats],
"seasonal_trends": [row.asDict() for row in seasonal_trend],
"total_analyzed_transactions": filtered_df.count(),
"total_revenue": filtered_df.agg(sum("purchase_amount")).collect()[0][0]
}
spark.stop()
return consumption_result
六、国内旅游景点游客数据分析系统-文档展示
七、END
💕💕文末获取源码联系计算机编程果茶熊