💖💖作者:计算机毕业设计江挽 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目
基于大数据的豆瓣电影用户行为与市场趋势分析系统介绍
本系统是一套基于大数据技术的豆瓣电影用户行为与市场趋势分析平台,采用Hadoop分布式存储框架和Spark大数据处理引擎作为核心技术架构。系统后端基于Django框架开发,前端采用Vue+ElementUI+Echarts技术栈构建可视化界面。系统通过HDFS存储海量豆瓣电影数据,利用Spark SQL进行高效数据查询与清洗,结合Pandas和NumPy进行深度数据分析。功能模块涵盖豆瓣电影数据管理、评论情感分析、市场热度分析、电影基础特征分析、质量市场表现分析、用户聚类分析以及用户评分行为分析七大核心板块。系统通过Spark分布式计算能力处理大规模电影评论数据,实现对用户观影偏好的精准挖掘,同时运用机器学习算法对用户群体进行智能聚类,帮助理解不同用户群体的行为特征。通过Echarts图表展示分析结果,直观呈现电影市场趋势变化、用户评分分布规律以及情感倾向等关键指标,为电影市场研究提供数据支撑。
基于大数据的豆瓣电影用户行为与市场趋势分析系统演示视频
基于大数据的豆瓣电影用户行为与市场趋势分析系统演示图片
基于大数据的豆瓣电影用户行为与市场趋势分析系统代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, avg, when, regexp_replace, lower, trim, explode, split, year, month, desc, row_number, concat_ws
from pyspark.sql.window import Window
from pyspark.ml.feature import VectorAssembler, StandardScaler
from pyspark.ml.clustering import KMeans
from django.http import JsonResponse
from django.views import View
import pandas as pd
import numpy as np
import json
spark = SparkSession.builder.appName("DoubanMovieAnalysis").config("spark.sql.shuffle.partitions", "4").config("spark.driver.memory", "2g").getOrCreate()
class SentimentAnalysisView(View):
def post(self, request):
data = json.loads(request.body)
movie_ids = data.get('movie_ids', [])
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/douban_movie").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "movie_comments").option("user", "root").option("password", "123456").load()
if movie_ids:
df = df.filter(col("movie_id").isin(movie_ids))
positive_words = ["好看", "精彩", "优秀", "喜欢", "推荐", "感动", "震撼", "经典", "完美", "棒", "赞"]
negative_words = ["难看", "垃圾", "无聊", "差", "烂", "失望", "浪费", "糟糕", "恶心", "后悔"]
df = df.withColumn("comment_clean", lower(trim(regexp_replace(col("comment_text"), "[^\u4e00-\u9fa5a-zA-Z]", ""))))
positive_pattern = "|".join(positive_words)
negative_pattern = "|".join(negative_words)
df = df.withColumn("positive_count", when(col("comment_clean").rlike(positive_pattern), 1).otherwise(0))
df = df.withColumn("negative_count", when(col("comment_clean").rlike(negative_pattern), 1).otherwise(0))
df = df.withColumn("sentiment", when(col("positive_count") > col("negative_count"), "正面").when(col("positive_count") < col("negative_count"), "负面").otherwise("中性"))
sentiment_stats = df.groupBy("movie_id", "sentiment").agg(count("*").alias("count"))
sentiment_pivot = sentiment_stats.groupBy("movie_id").pivot("sentiment", ["正面", "负面", "中性"]).sum("count").fillna(0)
sentiment_pivot = sentiment_pivot.withColumn("total", col("正面") + col("负面") + col("中性"))
sentiment_pivot = sentiment_pivot.withColumn("positive_rate", (col("正面") / col("total") * 100).cast("decimal(10,2)"))
sentiment_pivot = sentiment_pivot.withColumn("negative_rate", (col("负面") / col("total") * 100).cast("decimal(10,2)"))
sentiment_pivot = sentiment_pivot.withColumn("neutral_rate", (col("中性") / col("total") * 100).cast("decimal(10,2)"))
result_df = sentiment_pivot.join(spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/douban_movie").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "movies").option("user", "root").option("password", "123456").load().select("id", "movie_name"), sentiment_pivot.movie_id == col("id"), "left")
result_list = result_df.select("movie_name", "正面", "负面", "中性", "positive_rate", "negative_rate", "neutral_rate").collect()
response_data = [{"movie_name": row["movie_name"], "positive": int(row["正面"]), "negative": int(row["负面"]), "neutral": int(row["中性"]), "positive_rate": float(row["positive_rate"]), "negative_rate": float(row["negative_rate"]), "neutral_rate": float(row["neutral_rate"])} for row in result_list]
return JsonResponse({"code": 200, "message": "情感分析完成", "data": response_data})
class UserClusteringView(View):
def post(self, request):
data = json.loads(request.body)
k_clusters = data.get('k_clusters', 3)
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/douban_movie").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "user_ratings").option("user", "root").option("password", "123456").load()
user_features = df.groupBy("user_id").agg(count("*").alias("rating_count"), avg("rating_score").alias("avg_rating"), avg(when(col("rating_score") >= 4, 1).otherwise(0)).alias("high_rating_ratio"), avg(when(col("rating_score") <= 2, 1).otherwise(0)).alias("low_rating_ratio"))
user_movie_types = df.join(spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/douban_movie").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "movies").option("user", "root").option("password", "123456").load().select("id", "movie_type"), df.movie_id == col("id"), "left")
user_movie_types = user_movie_types.withColumn("type_split", explode(split(col("movie_type"), ",")))
type_preference = user_movie_types.groupBy("user_id", "type_split").agg(count("*").alias("type_count"))
window_spec = Window.partitionBy("user_id").orderBy(desc("type_count"))
top_type = type_preference.withColumn("rank", row_number().over(window_spec)).filter(col("rank") == 1).select("user_id", col("type_split").alias("favorite_type"))
user_features = user_features.join(top_type, "user_id", "left")
feature_cols = ["rating_count", "avg_rating", "high_rating_ratio", "low_rating_ratio"]
assembler = VectorAssembler(inputCols=feature_cols, outputCol="features_raw")
user_features_vec = assembler.transform(user_features)
scaler = StandardScaler(inputCol="features_raw", outputCol="features", withStd=True, withMean=True)
scaler_model = scaler.fit(user_features_vec)
user_features_scaled = scaler_model.transform(user_features_vec)
kmeans = KMeans(k=k_clusters, seed=42, featuresCol="features", predictionCol="cluster")
kmeans_model = kmeans.fit(user_features_scaled)
clustered_users = kmeans_model.transform(user_features_scaled)
cluster_stats = clustered_users.groupBy("cluster").agg(count("*").alias("user_count"), avg("rating_count").alias("avg_rating_count"), avg("avg_rating").alias("cluster_avg_rating"), avg("high_rating_ratio").alias("avg_high_ratio"))
cluster_result = cluster_stats.collect()
cluster_data = [{"cluster_id": int(row["cluster"]), "user_count": int(row["user_count"]), "avg_rating_count": float(row["avg_rating_count"]), "cluster_avg_rating": float(row["cluster_avg_rating"]), "avg_high_ratio": float(row["avg_high_ratio"])} for row in cluster_result]
user_cluster_list = clustered_users.select("user_id", "cluster", "rating_count", "avg_rating", "favorite_type").limit(100).collect()
user_data = [{"user_id": int(row["user_id"]), "cluster": int(row["cluster"]), "rating_count": int(row["rating_count"]), "avg_rating": float(row["avg_rating"]), "favorite_type": row["favorite_type"]} for row in user_cluster_list]
return JsonResponse({"code": 200, "message": "用户聚类分析完成", "cluster_stats": cluster_data, "user_samples": user_data})
class MarketHeatAnalysisView(View):
def post(self, request):
data = json.loads(request.body)
start_date = data.get('start_date')
end_date = data.get('end_date')
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/douban_movie").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "user_ratings").option("user", "root").option("password", "123456").load()
if start_date and end_date:
df = df.filter((col("rating_time") >= start_date) & (col("rating_time") <= end_date))
df = df.withColumn("rating_year", year(col("rating_time"))).withColumn("rating_month", month(col("rating_time")))
monthly_heat = df.groupBy("movie_id", "rating_year", "rating_month").agg(count("*").alias("rating_count"), avg("rating_score").alias("avg_score"))
monthly_heat = monthly_heat.withColumn("heat_score", (col("rating_count") * 0.7 + col("avg_score") * col("rating_count") * 0.3).cast("decimal(10,2)"))
monthly_heat = monthly_heat.withColumn("time_period", concat_ws("-", col("rating_year"), col("rating_month")))
movie_info = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/douban_movie").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "movies").option("user", "root").option("password", "123456").load().select("id", "movie_name", "movie_type", "release_date")
heat_with_info = monthly_heat.join(movie_info, monthly_heat.movie_id == movie_info.id, "left")
window_spec = Window.partitionBy("time_period").orderBy(desc("heat_score"))
top_movies = heat_with_info.withColumn("rank", row_number().over(window_spec)).filter(col("rank") <= 10)
result_df = top_movies.select("time_period", "movie_name", "movie_type", "rating_count", "avg_score", "heat_score", "rank").orderBy("time_period", "rank")
result_list = result_df.collect()
response_data = {}
for row in result_list:
period = row["time_period"]
if period not in response_data:
response_data[period] = []
response_data[period].append({"movie_name": row["movie_name"], "movie_type": row["movie_type"], "rating_count": int(row["rating_count"]), "avg_score": float(row["avg_score"]), "heat_score": float(row["heat_score"]), "rank": int(row["rank"])})
overall_heat = df.groupBy("movie_id").agg(count("*").alias("total_ratings"), avg("rating_score").alias("overall_avg_score"))
overall_heat = overall_heat.withColumn("overall_heat", (col("total_ratings") * 0.7 + col("overall_avg_score") * col("total_ratings") * 0.3).cast("decimal(10,2)"))
overall_top = overall_heat.join(movie_info, overall_heat.movie_id == movie_info.id, "left").orderBy(desc("overall_heat")).limit(20)
overall_list = overall_top.select("movie_name", "total_ratings", "overall_avg_score", "overall_heat").collect()
overall_data = [{"movie_name": row["movie_name"], "total_ratings": int(row["total_ratings"]), "overall_avg_score": float(row["overall_avg_score"]), "overall_heat": float(row["overall_heat"])} for row in overall_list]
return JsonResponse({"code": 200, "message": "市场热度分析完成", "monthly_heat": response_data, "overall_top": overall_data})
基于大数据的豆瓣电影用户行为与市场趋势分析系统文档展示
💖💖作者:计算机毕业设计江挽 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目