💖💖作者:计算机毕业设计小途 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目
@TOC
基于大数据的哔哩哔哩热门视频数据可视化分析系统介绍
《基于大数据的哔哩哔哩热门视频数据可视化分析系统》是一套采用现代大数据技术栈构建的综合性视频数据分析平台,该系统充分利用Hadoop分布式存储框架和Spark大数据处理引擎的强大计算能力,对哔哩哔哩平台的海量视频数据进行深度挖掘与智能分析。系统采用前后端分离的架构设计,前端基于Vue框架结合ElementUI组件库构建用户界面,通过Echarts图表库实现丰富的数据可视化效果,后端支持Django和Spring Boot双重技术方案,确保系统的灵活性和扩展性。在数据处理层面,系统运用Spark SQL进行大规模数据查询与计算,结合Pandas和NumPy进行数据预处理和统计分析,将处理结果存储于MySQL数据库中以保证数据的持久化和高效访问。系统核心功能涵盖多维度数据分析模块,包括创作者分析模块深入解析UP主的内容创作规律和影响力分布,用户行为分析模块挖掘观众的观看偏好和互动模式,时间规律分析模块揭示视频热度的时间分布特征,视频内容分析模块识别热门内容的类型和标签趋势,地域分布分析模块展现不同地区用户的观看习惯差异。整个系统通过HDFS分布式文件系统实现海量视频数据的可靠存储,借助Spark的内存计算优势大幅提升数据处理效率,最终以直观易懂的图表和报表形式为用户呈现深度的数据洞察,为理解哔哩哔哩平台的内容生态和用户行为提供了强有力的数据支撑和决策依据。
基于大数据的哔哩哔哩热门视频数据可视化分析系统演示视频
基于大数据的哔哩哔哩热门视频数据可视化分析系统演示图片
基于大数据的哔哩哔哩热门视频数据可视化分析系统代码展示
spark = SparkSession.builder.appName("BilibiliVideoAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/bilibili_db").option("dbtable", "video_data").option("user", "root").option("password", "password").load()
def analyze_video_content():
video_df = df.select("video_id", "title", "category", "tags", "view_count", "like_count", "comment_count", "duration", "upload_time")
category_stats = video_df.groupBy("category").agg(
F.count("video_id").alias("video_count"),
F.avg("view_count").alias("avg_views"),
F.avg("like_count").alias("avg_likes"),
F.sum("view_count").alias("total_views")
).orderBy(F.desc("total_views"))
tag_df = video_df.select("video_id", F.explode(F.split("tags", ",")).alias("tag"), "view_count", "like_count")
tag_popularity = tag_df.groupBy("tag").agg(
F.count("video_id").alias("tag_usage_count"),
F.avg("view_count").alias("avg_tag_views"),
F.sum("view_count").alias("total_tag_views")
).filter(F.col("tag_usage_count") >= 10).orderBy(F.desc("total_tag_views"))
engagement_rate = video_df.withColumn("engagement_rate",
(F.col("like_count") + F.col("comment_count")) / F.col("view_count") * 100
).select("video_id", "title", "category", "view_count", "engagement_rate").orderBy(F.desc("engagement_rate"))
duration_analysis = video_df.withColumn("duration_category",
F.when(F.col("duration") < 300, "短视频(5分钟以下)")
.when(F.col("duration") < 1200, "中等时长(5-20分钟)")
.otherwise("长视频(20分钟以上)")
).groupBy("duration_category").agg(
F.count("video_id").alias("count"),
F.avg("view_count").alias("avg_views"),
F.avg("like_count").alias("avg_likes")
)
hot_videos = video_df.withColumn("hot_score",
F.col("view_count") * 0.6 + F.col("like_count") * 0.3 + F.col("comment_count") * 0.1
).orderBy(F.desc("hot_score")).limit(100)
category_result = category_stats.collect()
tag_result = tag_popularity.limit(50).collect()
engagement_result = engagement_rate.limit(30).collect()
duration_result = duration_analysis.collect()
hot_result = hot_videos.collect()
return {
'category_analysis': [row.asDict() for row in category_result],
'tag_analysis': [row.asDict() for row in tag_result],
'engagement_analysis': [row.asDict() for row in engagement_result],
'duration_analysis': [row.asDict() for row in duration_result],
'hot_videos': [row.asDict() for row in hot_result]
}
def analyze_user_behavior():
user_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/bilibili_db").option("dbtable", "user_behavior").option("user", "root").option("password", "password").load()
behavior_df = user_df.select("user_id", "video_id", "action_type", "watch_duration", "watch_time", "device_type", "user_level")
watch_pattern = behavior_df.filter(F.col("action_type") == "watch").groupBy("user_id").agg(
F.count("video_id").alias("total_videos_watched"),
F.avg("watch_duration").alias("avg_watch_duration"),
F.sum("watch_duration").alias("total_watch_time"),
F.countDistinct("video_id").alias("unique_videos")
)
user_activity = watch_pattern.withColumn("activity_level",
F.when(F.col("total_videos_watched") >= 100, "高活跃用户")
.when(F.col("total_videos_watched") >= 30, "中等活跃用户")
.otherwise("低活跃用户")
).groupBy("activity_level").agg(
F.count("user_id").alias("user_count"),
F.avg("avg_watch_duration").alias("avg_duration"),
F.avg("total_watch_time").alias("avg_total_time")
)
device_usage = behavior_df.groupBy("device_type").agg(
F.countDistinct("user_id").alias("unique_users"),
F.count("video_id").alias("total_actions"),
F.avg("watch_duration").alias("avg_watch_duration")
).orderBy(F.desc("unique_users"))
interaction_analysis = behavior_df.filter(F.col("action_type").isin(["like", "comment", "share"])).groupBy("action_type").agg(
F.countDistinct("user_id").alias("active_users"),
F.count("video_id").alias("total_interactions")
)
user_retention = behavior_df.withColumn("watch_date", F.to_date("watch_time")).groupBy("user_id", "watch_date").agg(
F.count("video_id").alias("daily_videos")
).groupBy("user_id").agg(
F.countDistinct("watch_date").alias("active_days"),
F.max("watch_date").alias("last_active_date"),
F.min("watch_date").alias("first_active_date")
).withColumn("retention_days", F.datediff("last_active_date", "first_active_date"))
completion_rate = behavior_df.join(df.select("video_id", "duration"), "video_id").withColumn("completion_rate",
F.col("watch_duration") / F.col("duration") * 100
).filter(F.col("completion_rate") <= 100).groupBy("user_level").agg(
F.avg("completion_rate").alias("avg_completion_rate"),
F.count("user_id").alias("sample_count")
)
activity_result = user_activity.collect()
device_result = device_usage.collect()
interaction_result = interaction_analysis.collect()
retention_result = user_retention.collect()
completion_result = completion_rate.collect()
return {
'user_activity': [row.asDict() for row in activity_result],
'device_analysis': [row.asDict() for row in device_result],
'interaction_analysis': [row.asDict() for row in interaction_result],
'retention_analysis': [row.asDict() for row in retention_result],
'completion_analysis': [row.asDict() for row in completion_result]
}
def analyze_time_patterns():
time_df = df.select("video_id", "upload_time", "view_count", "like_count", "comment_count", "category")
hourly_analysis = time_df.withColumn("upload_hour", F.hour("upload_time")).groupBy("upload_hour").agg(
F.count("video_id").alias("video_count"),
F.avg("view_count").alias("avg_views"),
F.avg("like_count").alias("avg_likes"),
F.sum("view_count").alias("total_views")
).orderBy("upload_hour")
daily_analysis = time_df.withColumn("upload_day", F.dayofweek("upload_time")).groupBy("upload_day").agg(
F.count("video_id").alias("video_count"),
F.avg("view_count").alias("avg_views"),
F.avg("like_count").alias("avg_likes"),
F.sum("view_count").alias("total_views")
).withColumn("day_name",
F.when(F.col("upload_day") == 1, "周日")
.when(F.col("upload_day") == 2, "周一")
.when(F.col("upload_day") == 3, "周二")
.when(F.col("upload_day") == 4, "周三")
.when(F.col("upload_day") == 5, "周四")
.when(F.col("upload_day") == 6, "周五")
.otherwise("周六")
).orderBy("upload_day")
monthly_trend = time_df.withColumn("upload_month", F.month("upload_time")).withColumn("upload_year", F.year("upload_time")).groupBy("upload_year", "upload_month").agg(
F.count("video_id").alias("video_count"),
F.avg("view_count").alias("avg_views"),
F.sum("view_count").alias("total_views")
).orderBy("upload_year", "upload_month")
peak_hours = time_df.withColumn("upload_hour", F.hour("upload_time")).withColumn("performance_score",
F.col("view_count") * 0.5 + F.col("like_count") * 0.3 + F.col("comment_count") * 0.2
).groupBy("upload_hour").agg(
F.avg("performance_score").alias("avg_performance"),
F.count("video_id").alias("sample_size")
).filter(F.col("sample_size") >= 10).orderBy(F.desc("avg_performance"))
category_time_pattern = time_df.withColumn("upload_hour", F.hour("upload_time")).groupBy("category", "upload_hour").agg(
F.count("video_id").alias("video_count"),
F.avg("view_count").alias("avg_views")
).withColumn("hour_rank", F.row_number().over(
Window.partitionBy("category").orderBy(F.desc("avg_views"))
)).filter(F.col("hour_rank") <= 3)
seasonal_analysis = time_df.withColumn("upload_month", F.month("upload_time")).withColumn("season",
F.when(F.col("upload_month").isin(3, 4, 5), "春季")
.when(F.col("upload_month").isin(6, 7, 8), "夏季")
.when(F.col("upload_month").isin(9, 10, 11), "秋季")
.otherwise("冬季")
).groupBy("season").agg(
F.count("video_id").alias("video_count"),
F.avg("view_count").alias("avg_views"),
F.avg("like_count").alias("avg_likes")
)
hourly_result = hourly_analysis.collect()
daily_result = daily_analysis.collect()
monthly_result = monthly_trend.collect()
peak_result = peak_hours.collect()
category_time_result = category_time_pattern.collect()
seasonal_result = seasonal_analysis.collect()
return {
'hourly_pattern': [row.asDict() for row in hourly_result],
'daily_pattern': [row.asDict() for row in daily_result],
'monthly_trend': [row.asDict() for row in monthly_result],
'peak_hours': [row.asDict() for row in peak_result],
'category_time_pattern': [row.asDict() for row in category_time_result],
'seasonal_analysis': [row.asDict() for row in seasonal_result]
}
基于大数据的哔哩哔哩热门视频数据可视化分析系统文档展示
💖💖作者:计算机毕业设计小途 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目