【大数据】豆瓣电影数据可视化分析系统 计算机毕业设计项目 Hadoop+Spark环境配置 数据科学与大数据技术 附源码+文档+讲解

44 阅读5分钟

一、个人简介

💖💖作者:计算机编程果茶熊 💙💙个人简介:曾长期从事计算机专业培训教学,担任过编程老师,同时本人也热爱上课教学,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 计算机毕业设计选题 💕💕文末获取源码联系计算机编程果茶熊

二、系统介绍

大数据框架:Hadoop+Spark(Hive需要定制修改) 开发语言:Java+Python(两个版本都支持) 数据库:MySQL 后端框架:SpringBoot(Spring+SpringMVC+Mybatis)+Django(两个版本都支持) 前端:Vue+Echarts+HTML+CSS+JavaScript+jQuery

豆瓣电影数据可视化分析系统是一个基于大数据技术栈构建的电影数据分析平台,采用Hadoop+Spark作为核心大数据处理框架,结合Python开发语言和Django后端框架,前端使用Vue+ElementUI+Echarts技术栈实现数据可视化展示。系统利用HDFS进行分布式数据存储,通过Spark SQL进行高效的数据查询和分析,结合Pandas和NumPy进行数据处理和统计计算,MySQL作为关系型数据库存储结构化数据。系统提供完整的用户管理功能,支持豆瓣电影数据的采集、存储和管理,实现电影评分、类型、地区、时间等多维度分析,深入挖掘用户参与度和电影质量相关指标,最终通过可视化大屏直观展示分析结果,为电影行业的数据驱动决策提供技术支持。

三、视频解说

豆瓣电影数据可视化分析系统

四、部分功能展示

在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述

五、部分代码展示



from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
import pandas as pd
import numpy as np
from django.db import models
from django.contrib.auth.models import User
from django.http import JsonResponse
import json
import mysql.connector

spark = SparkSession.builder.appName("DoubanMovieAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()

def movie_rating_analysis(request):
    movie_data = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load("hdfs://localhost:9000/douban/movies.csv")
    user_ratings = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load("hdfs://localhost:9000/douban/ratings.csv")
    joined_data = movie_data.join(user_ratings, "movie_id", "inner")
    rating_stats = joined_data.groupBy("movie_id", "movie_name").agg(
        avg("rating").alias("avg_rating"),
        count("rating").alias("rating_count"),
        stddev("rating").alias("rating_std"),
        min("rating").alias("min_rating"),
        max("rating").alias("max_rating")
    )
    rating_distribution = joined_data.groupBy("rating").count().orderBy("rating")
    quality_analysis = rating_stats.withColumn("quality_score", 
        when(col("avg_rating") >= 8.0, "优秀")
        .when(col("avg_rating") >= 7.0, "良好")
        .when(col("avg_rating") >= 6.0, "一般")
        .otherwise("较差")
    )
    trending_movies = quality_analysis.filter(col("rating_count") >= 1000).orderBy(desc("avg_rating")).limit(50)
    rating_variance = rating_stats.withColumn("controversy_index", col("rating_std") / col("avg_rating"))
    controversial_movies = rating_variance.filter(col("rating_count") >= 500).orderBy(desc("controversy_index")).limit(20)
    monthly_trends = joined_data.withColumn("rating_month", date_format(col("rating_date"), "yyyy-MM")).groupBy("rating_month").agg(avg("rating").alias("monthly_avg"))
    result_data = {
        "rating_distribution": rating_distribution.toPandas().to_dict('records'),
        "top_movies": trending_movies.toPandas().to_dict('records'),
        "controversial_movies": controversial_movies.toPandas().to_dict('records'),
        "monthly_trends": monthly_trends.toPandas().to_dict('records'),
        "overall_stats": rating_stats.agg(avg("avg_rating"), avg("rating_count")).collect()[0].asDict()
    }
    return JsonResponse(result_data, safe=False)

def movie_category_analysis(request):
    movie_genres = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load("hdfs://localhost:9000/douban/movie_genres.csv")
    ratings_data = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load("hdfs://localhost:9000/douban/ratings.csv")
    genre_ratings = movie_genres.join(ratings_data, "movie_id", "inner")
    genre_popularity = genre_ratings.groupBy("genre").agg(
        count("movie_id").alias("movie_count"),
        countDistinct("user_id").alias("user_count"),
        avg("rating").alias("avg_rating"),
        sum("box_office").alias("total_box_office")
    )
    genre_evolution = genre_ratings.withColumn("release_year", year(col("release_date"))).groupBy("genre", "release_year").agg(
        count("movie_id").alias("yearly_count"),
        avg("rating").alias("yearly_rating")
    ).orderBy("release_year")
    genre_combinations = movie_genres.groupBy("movie_id").agg(collect_list("genre").alias("genres")).withColumn("genre_count", size(col("genres")))
    multi_genre_analysis = genre_combinations.filter(col("genre_count") > 1).join(ratings_data, "movie_id").groupBy("genres").agg(avg("rating").alias("combo_rating"), count("*").alias("combo_count"))
    user_genre_preferences = genre_ratings.groupBy("user_id", "genre").agg(
        avg("rating").alias("user_genre_rating"),
        count("movie_id").alias("user_genre_count")
    )
    genre_diversity = user_genre_preferences.groupBy("user_id").agg(
        countDistinct("genre").alias("genre_diversity"),
        avg("user_genre_rating").alias("overall_preference")
    )
    seasonal_trends = genre_ratings.withColumn("release_month", month(col("release_date"))).groupBy("genre", "release_month").agg(count("movie_id").alias("monthly_releases"))
    genre_performance_matrix = genre_popularity.withColumn("popularity_index", 
        (col("movie_count") * 0.3 + col("user_count") * 0.4 + col("avg_rating") * 0.3)
    ).orderBy(desc("popularity_index"))
    result_data = {
        "genre_popularity": genre_popularity.toPandas().to_dict('records'),
        "genre_evolution": genre_evolution.toPandas().to_dict('records'),
        "multi_genre_analysis": multi_genre_analysis.toPandas().to_dict('records'),
        "seasonal_trends": seasonal_trends.toPandas().to_dict('records'),
        "performance_ranking": genre_performance_matrix.toPandas().to_dict('records')
    }
    return JsonResponse(result_data, safe=False)

def user_engagement_analysis(request):
    user_activities = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load("hdfs://localhost:9000/douban/user_activities.csv")
    user_profiles = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load("hdfs://localhost:9000/douban/user_profiles.csv")
    engagement_metrics = user_activities.groupBy("user_id").agg(
        count("activity_id").alias("total_activities"),
        countDistinct("movie_id").alias("unique_movies"),
        sum("rating_count").alias("total_ratings"),
        sum("review_count").alias("total_reviews"),
        sum("comment_count").alias("total_comments"),
        avg("session_duration").alias("avg_session_time")
    )
    user_segments = engagement_metrics.withColumn("engagement_level",
        when(col("total_activities") >= 1000, "高活跃用户")
        .when(col("total_activities") >= 500, "中活跃用户")
        .when(col("total_activities") >= 100, "低活跃用户")
        .otherwise("潜在用户")
    )
    activity_patterns = user_activities.withColumn("activity_hour", hour(col("activity_time"))).withColumn("activity_weekday", dayofweek(col("activity_time"))).groupBy("activity_hour", "activity_weekday").agg(count("activity_id").alias("activity_frequency"))
    user_retention = user_activities.withColumn("activity_month", date_format(col("activity_time"), "yyyy-MM")).groupBy("user_id", "activity_month").agg(count("activity_id").alias("monthly_activities"))
    retention_cohorts = user_retention.groupBy("activity_month").agg(
        countDistinct("user_id").alias("active_users"),
        avg("monthly_activities").alias("avg_monthly_engagement")
    )
    content_preferences = user_activities.join(user_profiles, "user_id").groupBy("age_group", "gender").agg(
        avg("total_ratings").alias("avg_ratings_by_demo"),
        avg("total_reviews").alias("avg_reviews_by_demo")
    )
    engagement_correlation = engagement_metrics.join(user_profiles, "user_id").groupBy("user_level").agg(
        avg("total_activities").alias("avg_activities"),
        avg("avg_session_time").alias("avg_session_duration"),
        count("user_id").alias("user_count_in_level")
    )
    churn_prediction = user_activities.withColumn("days_since_last_activity", datediff(current_date(), col("last_activity_date"))).filter(col("days_since_last_activity") > 30).groupBy("user_id").agg(max("days_since_last_activity").alias("inactive_days"))
    engagement_trends = user_activities.withColumn("trend_week", date_format(col("activity_time"), "yyyy-ww")).groupBy("trend_week").agg(
        countDistinct("user_id").alias("weekly_active_users"),
        count("activity_id").alias("weekly_activities")
    ).orderBy("trend_week")
    result_data = {
        "user_segments": user_segments.toPandas().to_dict('records'),
        "activity_patterns": activity_patterns.toPandas().to_dict('records'),
        "retention_analysis": retention_cohorts.toPandas().to_dict('records'),
        "demographic_preferences": content_preferences.toPandas().to_dict('records'),
        "engagement_trends": engagement_trends.toPandas().to_dict('records'),
        "churn_risks": churn_prediction.toPandas().to_dict('records')
    }
    return JsonResponse(result_data, safe=False)



六、部分文档展示

在这里插入图片描述

七、END

💕💕文末获取源码联系计算机编程果茶熊