一、个人简介
💖💖作者:计算机编程果茶熊 💙💙个人简介:曾长期从事计算机专业培训教学,担任过编程老师,同时本人也热爱上课教学,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 计算机毕业设计选题 💕💕文末获取源码联系计算机编程果茶熊
二、系统介绍
大数据框架:Hadoop+Spark(Hive需要定制修改) 开发语言:Java+Python(两个版本都支持) 数据库:MySQL 后端框架:SpringBoot(Spring+SpringMVC+Mybatis)+Django(两个版本都支持) 前端:Vue+Echarts+HTML+CSS+JavaScript+jQuery
北京旅游景点可视化分析系统是一个基于大数据技术构建的智能化旅游数据分析平台,采用Hadoop+Spark分布式计算框架对北京地区旅游景点数据进行深度挖掘和可视化展示。系统后端基于Django框架开发,前端运用Vue+ElementUI+Echarts技术栈实现交互式界面设计,通过MySQL数据库存储结构化数据,利用HDFS分布式文件系统管理海量旅游数据。平台集成热度与口碑分析、消费与成本分析、空间地理分布分析、主题与特色分析等核心功能模块,为用户提供全方位的北京旅游景点数据洞察。系统运用Spark SQL进行高效数据查询处理,结合Pandas和NumPy进行数据清洗与统计分析,通过Echarts图表组件实现数据的多维度可视化呈现,帮助旅游管理部门、景区运营方以及游客群体更好地了解北京旅游市场现状与发展趋势,为旅游决策提供数据支撑。
三、视频解说
四、部分功能展示
五、部分代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, avg, sum, desc, when, regexp_replace, split
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import pandas as pd
import numpy as np
import json
spark = SparkSession.builder.appName("BeijingTourismAnalysis").config("spark.some.config.option", "some-value").getOrCreate()
def scenic_hotness_analysis(request):
scenic_df = spark.read.option("header", "true").csv("hdfs://localhost:9000/tourism/scenic_data.csv")
comment_df = spark.read.option("header", "true").csv("hdfs://localhost:9000/tourism/comment_data.csv")
scenic_df = scenic_df.withColumn("visit_count", col("visit_count").cast("integer"))
comment_df = comment_df.withColumn("rating", col("rating").cast("float"))
comment_df = comment_df.withColumn("scenic_id", col("scenic_id").cast("integer"))
hotness_score = scenic_df.join(comment_df, "scenic_id", "left") \
.groupBy("scenic_id", "scenic_name") \
.agg(count("comment_id").alias("comment_count"),
avg("rating").alias("avg_rating"),
sum("visit_count").alias("total_visits"))
hotness_score = hotness_score.withColumn("hotness_index",
(col("comment_count") * 0.3 + col("avg_rating") * 20 + col("total_visits") * 0.0001))
hotness_ranked = hotness_score.orderBy(desc("hotness_index")).limit(20)
result_pd = hotness_ranked.toPandas()
sentiment_analysis = comment_df.withColumn("sentiment_score",
when(col("rating") >= 4.0, 1)
.when(col("rating") >= 3.0, 0)
.otherwise(-1))
sentiment_summary = sentiment_analysis.groupBy("scenic_id") \
.agg(count(when(col("sentiment_score") == 1, 1)).alias("positive_count"),
count(when(col("sentiment_score") == 0, 1)).alias("neutral_count"),
count(when(col("sentiment_score") == -1, 1)).alias("negative_count"))
final_result = hotness_ranked.join(sentiment_summary, "scenic_id", "left")
analysis_data = final_result.toPandas().to_dict('records')
return JsonResponse({"status": "success", "data": analysis_data})
def consumption_cost_analysis(request):
ticket_df = spark.read.option("header", "true").csv("hdfs://localhost:9000/tourism/ticket_data.csv")
expense_df = spark.read.option("header", "true").csv("hdfs://localhost:9000/tourism/expense_data.csv")
ticket_df = ticket_df.withColumn("price", col("price").cast("float"))
ticket_df = ticket_df.withColumn("scenic_id", col("scenic_id").cast("integer"))
expense_df = expense_df.withColumn("amount", col("amount").cast("float"))
expense_df = expense_df.withColumn("scenic_id", col("scenic_id").cast("integer"))
price_stats = ticket_df.groupBy("scenic_id", "scenic_name") \
.agg(avg("price").alias("avg_ticket_price"),
count("*").alias("ticket_types"))
expense_stats = expense_df.groupBy("scenic_id", "expense_type") \
.agg(avg("amount").alias("avg_expense"),
sum("amount").alias("total_expense"))
expense_pivot = expense_stats.groupBy("scenic_id") \
.pivot("expense_type") \
.agg(avg("avg_expense"))
cost_analysis = price_stats.join(expense_pivot, "scenic_id", "left")
cost_analysis = cost_analysis.fillna(0)
total_cost_per_visitor = cost_analysis.withColumn("estimated_total_cost",
col("avg_ticket_price") +
col("food").cast("float") +
col("transport").cast("float") +
col("shopping").cast("float"))
cost_segments = total_cost_per_visitor.withColumn("cost_level",
when(col("estimated_total_cost") <= 100, "经济型")
.when(col("estimated_total_cost") <= 300, "中档型")
.otherwise("高端型"))
cost_distribution = cost_segments.groupBy("cost_level").count()
price_trends = ticket_df.groupBy("scenic_type") \
.agg(avg("price").alias("type_avg_price")) \
.orderBy(desc("type_avg_price"))
result_data = {
"cost_analysis": cost_analysis.toPandas().to_dict('records'),
"cost_distribution": cost_distribution.toPandas().to_dict('records'),
"price_trends": price_trends.toPandas().to_dict('records')
}
return JsonResponse({"status": "success", "data": result_data})
def spatial_geographic_analysis(request):
location_df = spark.read.option("header", "true").csv("hdfs://localhost:9000/tourism/location_data.csv")
visitor_df = spark.read.option("header", "true").csv("hdfs://localhost:9000/tourism/visitor_flow.csv")
location_df = location_df.withColumn("latitude", col("latitude").cast("float"))
location_df = location_df.withColumn("longitude", col("longitude").cast("float"))
visitor_df = visitor_df.withColumn("visitor_count", col("visitor_count").cast("integer"))
visitor_df = visitor_df.withColumn("scenic_id", col("scenic_id").cast("integer"))
district_mapping = location_df.withColumn("district",
when((col("latitude") >= 39.9 & (col("latitude") <= 40.0)) &
(col("longitude") >= 116.3 & (col("longitude") <= 116.5)), "朝阳区")
.when((col("latitude") >= 39.8 & (col("latitude") <= 39.95)) &
(col("longitude") >= 116.2 & (col("longitude") <= 116.4)), "海淀区")
.when((col("latitude") >= 39.85 & (col("latitude") <= 39.95)) &
(col("longitude") >= 116.35 & (col("longitude") <= 116.45)), "东城区")
.otherwise("其他区域"))
geo_visitor_data = district_mapping.join(visitor_df, "scenic_id", "inner")
district_stats = geo_visitor_data.groupBy("district") \
.agg(count("scenic_id").alias("scenic_count"),
sum("visitor_count").alias("total_visitors"),
avg("visitor_count").alias("avg_visitors_per_scenic"))
density_analysis = district_stats.withColumn("visitor_density",
col("total_visitors") / col("scenic_count"))
hotspot_identification = geo_visitor_data.filter(col("visitor_count") > 10000) \
.select("scenic_name", "district", "latitude", "longitude", "visitor_count") \
.orderBy(desc("visitor_count"))
clustering_result = location_df.withColumn("cluster_id",
when((col("latitude") >= 39.9) & (col("longitude") >= 116.4), 1)
.when((col("latitude") >= 39.85) & (col("longitude") <= 116.35), 2)
.otherwise(3))
cluster_summary = clustering_result.groupBy("cluster_id") \
.agg(count("scenic_id").alias("cluster_size"),
avg("latitude").alias("center_lat"),
avg("longitude").alias("center_lng"))
geographic_data = {
"district_stats": district_stats.toPandas().to_dict('records'),
"density_analysis": density_analysis.toPandas().to_dict('records'),
"hotspots": hotspot_identification.toPandas().to_dict('records'),
"clusters": cluster_summary.toPandas().to_dict('records')
}
return JsonResponse({"status": "success", "data": geographic_data})
六、部分文档展示
七、END
💕💕文末获取源码联系计算机编程果茶熊