前言
💖💖作者:计算机程序员小杨 💙💙个人简介:我是一名计算机相关专业的从业者,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。热爱技术,喜欢钻研新工具和框架,也乐于通过代码解决实际问题,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💕💕文末获取源码联系 计算机程序员小杨 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目 计算机毕业设计选题 💜💜
一.开发工具简介
大数据框架:Hadoop+Spark(本次没用Hive,支持定制) 开发语言:Python+Java(两个版本都支持) 后端框架:Django+Spring Boot(Spring+SpringMVC+Mybatis)(两个版本都支持) 前端:Vue+ElementUI+Echarts+HTML+CSS+JavaScript+jQuery 详细技术点:Hadoop、HDFS、Spark、Spark SQL、Pandas、NumPy 数据库:MySQL
二.系统内容简介
本NBA美国职业篮球联赛数据分析可视化系统是基于大数据技术栈构建的专业体育数据分析平台,采用Hadoop分布式存储架构和Spark大数据处理引擎作为核心技术支撑。系统通过Django和Spring Boot双重后端架构设计,结合MySQL关系型数据库进行数据持久化管理,前端采用Vue框架配合ElementUI组件库和Echarts图表库实现交互式数据可视化展示。系统集成了球员表现分析、比赛阶段分析、发展趋势分析、团队实力分析和数据关系分析五大核心功能模块,通过Spark SQL进行高效的数据查询和统计分析,结合Pandas和NumPy进行数据预处理和数学计算。系统能够处理海量的NBA赛事数据,为用户提供多维度的数据洞察和可视化报表,支持实时数据更新和历史数据回溯分析,帮助用户深入理解篮球比赛的数据规律和球员团队表现特征。
三.系统功能演示
四.系统界面展示
五.系统源码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views import View
import json
spark = SparkSession.builder.appName("NBADataAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()
class PlayerPerformanceAnalysis(View):
def post(self, request):
data = json.loads(request.body)
player_id = data.get('player_id')
season = data.get('season')
df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/nba_db").option("dbtable", "player_stats").option("user", "root").option("password", "password").load()
player_df = df.filter((col("player_id") == player_id) & (col("season") == season))
avg_stats = player_df.agg(avg("points").alias("avg_points"), avg("rebounds").alias("avg_rebounds"), avg("assists").alias("avg_assists"), avg("steals").alias("avg_steals"), avg("blocks").alias("avg_blocks")).collect()[0]
total_games = player_df.count()
efficiency_rating = player_df.withColumn("efficiency", (col("points") + col("rebounds") + col("assists") + col("steals") + col("blocks")) - (col("turnovers") + col("missed_shots")))
max_efficiency = efficiency_rating.agg(max("efficiency").alias("max_eff")).collect()[0]["max_eff"]
min_efficiency = efficiency_rating.agg(min("efficiency").alias("min_eff")).collect()[0]["min_eff"]
consistency_score = efficiency_rating.agg(stddev("efficiency").alias("std_eff")).collect()[0]["std_eff"]
performance_trend = player_df.select("game_date", "points", "rebounds", "assists").orderBy("game_date").collect()
recent_form = player_df.orderBy(desc("game_date")).limit(10).agg(avg("points").alias("recent_avg_points")).collect()[0]["recent_avg_points"]
comparison_data = df.filter(col("season") == season).agg(avg("points").alias("league_avg_points"), avg("rebounds").alias("league_avg_rebounds"), avg("assists").alias("league_avg_assists")).collect()[0]
performance_percentile = df.filter(col("season") == season).select("points").rdd.map(lambda x: x[0]).collect()
player_points_rank = sorted(performance_percentile, reverse=True).index(avg_stats["avg_points"]) + 1
total_players = len(performance_percentile)
percentile_rank = (total_players - player_points_rank) / total_players * 100
result = {"avg_stats": dict(avg_stats.asDict()), "total_games": total_games, "max_efficiency": max_efficiency, "min_efficiency": min_efficiency, "consistency_score": consistency_score, "performance_trend": [{"date": row["game_date"], "points": row["points"], "rebounds": row["rebounds"], "assists": row["assists"]} for row in performance_trend], "recent_form": recent_form, "league_comparison": dict(comparison_data.asDict()), "percentile_rank": percentile_rank}
return JsonResponse(result)
class GameStageAnalysis(View):
def post(self, request):
data = json.loads(request.body)
team_id = data.get('team_id')
season = data.get('season')
games_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/nba_db").option("dbtable", "game_stats").option("user", "root").option("password", "password").load()
team_games = games_df.filter((col("home_team_id") == team_id) | (col("away_team_id") == team_id)).filter(col("season") == season)
quarter_performance = team_games.select("game_id", "q1_score", "q2_score", "q3_score", "q4_score", "overtime_score", when(col("home_team_id") == team_id, col("home_team_id")).otherwise(col("away_team_id")).alias("team"))
q1_avg = quarter_performance.agg(avg("q1_score").alias("avg_q1")).collect()[0]["avg_q1"]
q2_avg = quarter_performance.agg(avg("q2_score").alias("avg_q2")).collect()[0]["avg_q2"]
q3_avg = quarter_performance.agg(avg("q3_score").alias("avg_q3")).collect()[0]["avg_q3"]
q4_avg = quarter_performance.agg(avg("q4_score").alias("avg_q4")).collect()[0]["avg_q4"]
strongest_quarter = max([("Q1", q1_avg), ("Q2", q2_avg), ("Q3", q3_avg), ("Q4", q4_avg)], key=lambda x: x[1])
weakest_quarter = min([("Q1", q1_avg), ("Q2", q2_avg), ("Q3", q3_avg), ("Q4", q4_avg)], key=lambda x: x[1])
comeback_games = team_games.filter((col("halftime_deficit") > 0) & (col("final_result") == "W")).count()
blowout_games = team_games.filter(abs(col("final_margin")) > 20).count()
close_games = team_games.filter(abs(col("final_margin")) <= 5).count()
close_game_record = team_games.filter((abs(col("final_margin")) <= 5) & (col("final_result") == "W")).count()
fourth_quarter_performance = team_games.select("q4_score", "opponent_q4_score", "final_result")
fourth_quarter_wins = fourth_quarter_performance.filter((col("q4_score") > col("opponent_q4_score")) & (col("final_result") == "W")).count()
clutch_rating = (close_game_record / close_games * 100) if close_games > 0 else 0
momentum_shifts = team_games.select("game_id", "largest_lead", "largest_deficit").collect()
avg_largest_lead = sum([row["largest_lead"] for row in momentum_shifts]) / len(momentum_shifts)
avg_largest_deficit = sum([row["largest_deficit"] for row in momentum_shifts]) / len(momentum_shifts)
result = {"quarter_averages": {"Q1": q1_avg, "Q2": q2_avg, "Q3": q3_avg, "Q4": q4_avg}, "strongest_quarter": strongest_quarter, "weakest_quarter": weakest_quarter, "comeback_games": comeback_games, "blowout_games": blowout_games, "close_games": close_games, "close_game_record": close_game_record, "clutch_rating": clutch_rating, "fourth_quarter_wins": fourth_quarter_wins, "avg_largest_lead": avg_largest_lead, "avg_largest_deficit": avg_largest_deficit}
return JsonResponse(result)
class DevelopmentTrendAnalysis(View):
def post(self, request):
data = json.loads(request.body)
player_id = data.get('player_id')
start_season = data.get('start_season')
end_season = data.get('end_season')
career_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/nba_db").option("dbtable", "career_stats").option("user", "root").option("password", "password").load()
player_career = career_df.filter((col("player_id") == player_id) & (col("season") >= start_season) & (col("season") <= end_season)).orderBy("season")
seasonal_stats = player_career.select("season", "points_per_game", "rebounds_per_game", "assists_per_game", "field_goal_percentage", "three_point_percentage", "minutes_played").collect()
points_trend = [row["points_per_game"] for row in seasonal_stats]
rebounds_trend = [row["rebounds_per_game"] for row in seasonal_stats]
assists_trend = [row["assists_per_game"] for row in seasonal_stats]
shooting_trend = [row["field_goal_percentage"] for row in seasonal_stats]
three_point_trend = [row["three_point_percentage"] for row in seasonal_stats]
points_growth_rate = ((points_trend[-1] - points_trend[0]) / points_trend[0] * 100) if len(points_trend) > 1 else 0
rebounds_growth_rate = ((rebounds_trend[-1] - rebounds_trend[0]) / rebounds_trend[0] * 100) if len(rebounds_trend) > 1 else 0
assists_growth_rate = ((assists_trend[-1] - assists_trend[0]) / assists_trend[0] * 100) if len(assists_trend) > 1 else 0
peak_season = player_career.orderBy(desc("player_efficiency_rating")).limit(1).select("season", "player_efficiency_rating").collect()[0]
decline_indicator = np.polyfit(range(len(points_trend)), points_trend, 1)[0] if len(points_trend) >= 2 else 0
consistency_metrics = player_career.agg(stddev("points_per_game").alias("points_std"), stddev("rebounds_per_game").alias("rebounds_std"), stddev("assists_per_game").alias("assists_std")).collect()[0]
injury_impact_seasons = player_career.filter(col("games_played") < 70).select("season", "games_played").collect()
breakout_season = None
for i in range(1, len(points_trend)):
if points_trend[i] - points_trend[i-1] > 5:
breakout_season = seasonal_stats[i]["season"]
break
career_milestones = player_career.select("season", "total_points", "total_rebounds", "total_assists").collect()
milestone_seasons = []
for row in career_milestones:
if row["total_points"] >= 10000 and row["season"] not in [m["season"] for m in milestone_seasons]:
milestone_seasons.append({"season": row["season"], "milestone": "10K Points"})
if row["total_rebounds"] >= 5000 and row["season"] not in [m["season"] for m in milestone_seasons]:
milestone_seasons.append({"season": row["season"], "milestone": "5K Rebounds"})
result = {"seasonal_progression": [{"season": row["season"], "points": row["points_per_game"], "rebounds": row["rebounds_per_game"], "assists": row["assists_per_game"], "fg_pct": row["field_goal_percentage"], "three_pct": row["three_point_percentage"]} for row in seasonal_stats], "growth_rates": {"points": points_growth_rate, "rebounds": rebounds_growth_rate, "assists": assists_growth_rate}, "peak_season": {"season": peak_season["season"], "rating": peak_season["player_efficiency_rating"]}, "decline_indicator": decline_indicator, "consistency_scores": dict(consistency_metrics.asDict()), "injury_seasons": [{"season": row["season"], "games_played": row["games_played"]} for row in injury_impact_seasons], "breakout_season": breakout_season, "career_milestones": milestone_seasons}
return JsonResponse(result)
六.系统文档展示
结束
💕💕文末获取源码联系 计算机程序员小杨