💖💖作者:计算机毕业设计杰瑞 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学校实战项目 计算机毕业设计选题推荐
基于大数据的汽车各品牌投诉数据分析系统介绍
汽车品牌投诉数据分析系统是一款基于大数据技术栈构建的综合性数据分析平台,专门针对汽车行业的消费者投诉信息进行深度挖掘和可视化展示。系统采用Hadoop+Spark作为核心大数据处理框架,通过HDFS分布式文件系统存储海量投诉数据,利用Spark SQL进行高效的数据查询和分析处理。技术架构上支持Python+Django和Java+Spring Boot两套后端解决方案,前端采用Vue+ElementUI+Echarts技术栈构建用户交互界面。系统核心功能模块包括品牌分析、车型分析、问题分析、文本挖掘分析等,能够从多个维度对汽车投诉数据进行统计分析,通过数据大屏实时展示分析结果,为汽车行业的质量监控、品牌管理和消费者权益保护提供数据支撑。整个系统融合了大数据处理、数据挖掘、可视化分析等多项技术,展现了现代数据科学在传统行业中的应用价值。
基于大数据的汽车各品牌投诉数据分析系统演示视频
基于大数据的汽车各品牌投诉数据分析系统演示图片
基于大数据的汽车各品牌投诉数据分析系统代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, sum as spark_sum, avg, desc, when
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType
from pyspark.ml.feature import Tokenizer, StopWordsRemover, CountVectorizer
from pyspark.ml.clustering import LDA
import pandas as pd
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json
spark = SparkSession.builder.appName("CarComplaintAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()
@csrf_exempt
def brand_analysis(request):
complaint_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/car_complaint").option("dbtable", "complaint_info").option("user", "root").option("password", "123456").load()
brand_complaint_count = complaint_df.groupBy("brand_name").agg(count("*").alias("complaint_count")).orderBy(desc("complaint_count"))
brand_severity_analysis = complaint_df.groupBy("brand_name").agg(avg("severity_score").alias("avg_severity"), count(when(col("severity_score") >= 8, True)).alias("high_severity_count"))
brand_trend_analysis = complaint_df.groupBy("brand_name", "complaint_month").agg(count("*").alias("monthly_count")).orderBy("brand_name", "complaint_month")
brand_problem_distribution = complaint_df.groupBy("brand_name", "problem_category").agg(count("*").alias("category_count")).orderBy("brand_name", desc("category_count"))
brand_resolution_rate = complaint_df.groupBy("brand_name").agg((spark_sum(when(col("resolution_status") == "resolved", 1).otherwise(0)) / count("*") * 100).alias("resolution_rate"))
final_brand_analysis = brand_complaint_count.join(brand_severity_analysis, "brand_name").join(brand_resolution_rate, "brand_name")
top_brands = final_brand_analysis.limit(20).collect()
brand_data = []
for row in top_brands:
brand_info = {"brand_name": row.brand_name, "complaint_count": row.complaint_count, "avg_severity": round(row.avg_severity, 2), "high_severity_count": row.high_severity_count, "resolution_rate": round(row.resolution_rate, 2)}
brand_data.append(brand_info)
trend_data = brand_trend_analysis.filter(col("brand_name").isin([brand["brand_name"] for brand in brand_data[:10]])).collect()
problem_data = brand_problem_distribution.filter(col("brand_name").isin([brand["brand_name"] for brand in brand_data[:10]])).collect()
return JsonResponse({"brand_analysis": brand_data, "trend_data": [{"brand": row.brand_name, "month": row.complaint_month, "count": row.monthly_count} for row in trend_data], "problem_distribution": [{"brand": row.brand_name, "category": row.problem_category, "count": row.category_count} for row in problem_data]})
@csrf_exempt
def model_analysis(request):
complaint_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/car_complaint").option("dbtable", "complaint_info").option("user", "root").option("password", "123456").load()
model_complaint_stats = complaint_df.groupBy("brand_name", "model_name").agg(count("*").alias("total_complaints"), avg("severity_score").alias("avg_severity"), spark_sum(when(col("problem_category") == "engine", 1).otherwise(0)).alias("engine_issues"), spark_sum(when(col("problem_category") == "transmission", 1).otherwise(0)).alias("transmission_issues"), spark_sum(when(col("problem_category") == "electrical", 1).otherwise(0)).alias("electrical_issues"))
model_age_analysis = complaint_df.groupBy("brand_name", "model_name", "vehicle_age_group").agg(count("*").alias("age_group_complaints")).orderBy("brand_name", "model_name", "vehicle_age_group")
model_price_impact = complaint_df.join(spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/car_complaint").option("dbtable", "model_price").option("user", "root").option("password", "123456").load(), ["brand_name", "model_name"], "left")
model_value_analysis = model_price_impact.groupBy("brand_name", "model_name", "price_range").agg(count("*").alias("complaints_in_range"), avg("severity_score").alias("avg_severity_by_price"))
model_seasonal_pattern = complaint_df.groupBy("brand_name", "model_name", "complaint_season").agg(count("*").alias("seasonal_complaints")).orderBy("brand_name", "model_name", "complaint_season")
model_geographic_distribution = complaint_df.groupBy("brand_name", "model_name", "complaint_region").agg(count("*").alias("regional_complaints")).orderBy("brand_name", "model_name", desc("regional_complaints"))
comprehensive_model_analysis = model_complaint_stats.join(model_value_analysis, ["brand_name", "model_name"], "left")
top_problematic_models = comprehensive_model_analysis.filter(col("total_complaints") >= 50).orderBy(desc("avg_severity")).limit(30).collect()
model_data = []
for row in top_problematic_models:
model_info = {"brand_name": row.brand_name, "model_name": row.model_name, "total_complaints": row.total_complaints, "avg_severity": round(row.avg_severity, 2), "engine_issues": row.engine_issues, "transmission_issues": row.transmission_issues, "electrical_issues": row.electrical_issues, "price_range": row.price_range if hasattr(row, 'price_range') else "Unknown"}
model_data.append(model_info)
age_analysis_data = model_age_analysis.filter(col("model_name").isin([model["model_name"] for model in model_data[:15]])).collect()
seasonal_data = model_seasonal_pattern.filter(col("model_name").isin([model["model_name"] for model in model_data[:15]])).collect()
return JsonResponse({"model_analysis": model_data, "age_distribution": [{"brand": row.brand_name, "model": row.model_name, "age_group": row.vehicle_age_group, "complaints": row.age_group_complaints} for row in age_analysis_data], "seasonal_pattern": [{"brand": row.brand_name, "model": row.model_name, "season": row.complaint_season, "complaints": row.seasonal_complaints} for row in seasonal_data]})
@csrf_exempt
def text_mining_analysis(request):
complaint_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/car_complaint").option("dbtable", "complaint_info").option("user", "root").option("password", "123456").load()
text_data = complaint_df.select("complaint_id", "complaint_description", "brand_name", "problem_category").filter(col("complaint_description").isNotNull())
tokenizer = Tokenizer(inputCol="complaint_description", outputCol="words")
tokenized_data = tokenizer.transform(text_data)
stop_words_remover = StopWordsRemover(inputCol="words", outputCol="filtered_words")
filtered_data = stop_words_remover.transform(tokenized_data)
count_vectorizer = CountVectorizer(inputCol="filtered_words", outputCol="features", vocabSize=1000, minDF=2.0)
cv_model = count_vectorizer.fit(filtered_data)
vectorized_data = cv_model.transform(filtered_data)
lda = LDA(k=10, maxIter=20, featuresCol="features", seed=42)
lda_model = lda.fit(vectorized_data)
topics = lda_model.describeTopics(maxTermsPerTopic=10)
vocabulary = cv_model.vocabulary
topic_results = []
for topic_row in topics.collect():
topic_id = topic_row.topic
term_indices = topic_row.termIndices
term_weights = topic_row.termWeights
topic_words = [vocabulary[idx] for idx in term_indices]
topic_info = {"topic_id": topic_id, "words": topic_words, "weights": [float(w) for w in term_weights]}
topic_results.append(topic_info)
keyword_frequency = filtered_data.select("filtered_words", "brand_name").rdd.flatMap(lambda row: [(word, row.brand_name) for word in row.filtered_words]).map(lambda x: (x, 1)).reduceByKey(lambda a, b: a + b).map(lambda x: (x[0][0], x[0][1], x[1])).collect()
keyword_brand_analysis = {}
for word, brand, count in keyword_frequency:
if word not in keyword_brand_analysis:
keyword_brand_analysis[word] = {}
keyword_brand_analysis[word][brand] = count
sentiment_patterns = complaint_df.groupBy("problem_category").agg(avg(when(col("complaint_description").contains("terrible"), 1).otherwise(0)).alias("negative_sentiment"), avg(when(col("complaint_description").contains("satisfied"), 1).otherwise(0)).alias("positive_sentiment"), count("*").alias("total_complaints"))
sentiment_data = sentiment_patterns.collect()
return JsonResponse({"topics": topic_results, "keyword_analysis": keyword_brand_analysis, "sentiment_patterns": [{"category": row.problem_category, "negative_rate": round(row.negative_sentiment, 3), "positive_rate": round(row.positive_sentiment, 3), "total": row.total_complaints} for row in sentiment_data]})
基于大数据的汽车各品牌投诉数据分析系统文档展示
💖💖作者:计算机毕业设计杰瑞 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学校实战项目 计算机毕业设计选题推荐