【大数据】葡萄酒品质数据可视化分析系统 计算机毕业设计项目 Hadoop+Spark环境配置 数据科学与大数据技术 附源码+文档+讲解

40 阅读4分钟

一、个人简介

💖💖作者:计算机编程果茶熊 💙💙个人简介:曾长期从事计算机专业培训教学,担任过编程老师,同时本人也热爱上课教学,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 计算机毕业设计选题 💕💕文末获取源码联系计算机编程果茶熊

二、系统介绍

大数据框架:Hadoop+Spark(Hive需要定制修改) 开发语言:Java+Python(两个版本都支持) 数据库:MySQL 后端框架:SpringBoot(Spring+SpringMVC+Mybatis)+Django(两个版本都支持) 前端:Vue+Echarts+HTML+CSS+JavaScript+jQuery

葡萄酒品质数据可视化分析系统是基于大数据技术构建的智能化分析平台,采用Hadoop+Spark分布式计算框架作为核心处理引擎,结合Django后端框架和Vue+ElementUI+Echarts前端技术栈,实现对葡萄酒品质数据的全方位分析与可视化展示。系统通过Spark SQL和Pandas、NumPy等数据处理工具,对葡萄酒的酒精度、酸度、pH值等关键理化指标进行深度挖掘,提供酒精酸度品质分析、指标相关性矩阵分析、K-Means聚类分析、离群值检测等多维度分析功能。平台集成MySQL数据库进行数据存储管理,运用机器学习算法实现智能聚类和预测分析,通过直观的图表和可视化大屏,为酿酒企业和研究机构提供科学的品质评估依据和决策支持,助力葡萄酒产业的数字化转型和品质提升。

三、视频解说

葡萄酒品质数据可视化分析系统

四、部分功能展示

在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述

五、部分代码展示


from pyspark.sql import SparkSession
from pyspark.sql.functions import col, avg, stddev, corr, when, abs as spark_abs
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import VectorAssembler, StandardScaler
from pyspark.ml.stat import Correlation
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json

spark = SparkSession.builder.appName("WineQualityAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()

def alcohol_quality_analysis(wine_data):
    df = spark.createDataFrame(wine_data)
    quality_stats = df.groupBy("quality").agg(
        avg("alcohol").alias("avg_alcohol"),
        stddev("alcohol").alias("std_alcohol"),
        avg("fixed_acidity").alias("avg_fixed_acidity"),
        avg("volatile_acidity").alias("avg_volatile_acidity"),
        avg("citric_acid").alias("avg_citric_acid"),
        avg("pH").alias("avg_ph")
    ).orderBy("quality")
    correlation_alcohol_quality = df.stat.corr("alcohol", "quality")
    correlation_acidity_quality = df.stat.corr("fixed_acidity", "quality")
    high_quality_threshold = df.approxQuantile("quality", [0.75], 0.01)[0]
    high_quality_wines = df.filter(col("quality") >= high_quality_threshold)
    alcohol_quality_segments = df.withColumn("alcohol_level", 
        when(col("alcohol") < 10, "Low")
        .when((col("alcohol") >= 10) & (col("alcohol") < 12), "Medium")
        .otherwise("High")
    ).groupBy("alcohol_level", "quality").count()
    quality_distribution = df.groupBy("quality").count().orderBy("quality")
    alcohol_ph_interaction = df.withColumn("alcohol_ph_ratio", col("alcohol") / col("pH"))
    interaction_quality_corr = alcohol_ph_interaction.stat.corr("alcohol_ph_ratio", "quality")
    result_stats = quality_stats.collect()
    result_segments = alcohol_quality_segments.collect()
    result_distribution = quality_distribution.collect()
    analysis_results = {
        "quality_statistics": [row.asDict() for row in result_stats],
        "alcohol_quality_correlation": correlation_alcohol_quality,
        "acidity_quality_correlation": correlation_acidity_quality,
        "alcohol_quality_segments": [row.asDict() for row in result_segments],
        "quality_distribution": [row.asDict() for row in result_distribution],
        "alcohol_ph_interaction_correlation": interaction_quality_corr
    }
    return analysis_results

def kmeans_clustering_analysis(wine_data):
    df = spark.createDataFrame(wine_data)
    feature_columns = ["alcohol", "fixed_acidity", "volatile_acidity", "citric_acid", "residual_sugar", "chlorides", "free_sulfur_dioxide", "total_sulfur_dioxide", "density", "pH", "sulphates"]
    assembler = VectorAssembler(inputCols=feature_columns, outputCol="features")
    feature_df = assembler.transform(df)
    scaler = StandardScaler(inputCol="features", outputCol="scaled_features", withStd=True, withMean=True)
    scaler_model = scaler.fit(feature_df)
    scaled_df = scaler_model.transform(feature_df)
    optimal_k = 3
    kmeans = KMeans(k=optimal_k, seed=42, featuresCol="scaled_features", predictionCol="cluster")
    kmeans_model = kmeans.fit(scaled_df)
    clustered_df = kmeans_model.transform(scaled_df)
    cluster_stats = clustered_df.groupBy("cluster").agg(
        avg("alcohol").alias("avg_alcohol"),
        avg("fixed_acidity").alias("avg_fixed_acidity"),
        avg("volatile_acidity").alias("avg_volatile_acidity"),
        avg("pH").alias("avg_ph"),
        avg("quality").alias("avg_quality")
    ).orderBy("cluster")
    cluster_counts = clustered_df.groupBy("cluster").count().orderBy("cluster")
    quality_cluster_distribution = clustered_df.groupBy("cluster", "quality").count().orderBy("cluster", "quality")
    cluster_centers = kmeans_model.clusterCenters()
    silhouette_samples = []
    for i in range(optimal_k):
        cluster_data = clustered_df.filter(col("cluster") == i)
        if cluster_data.count() > 1:
            cluster_features = cluster_data.select("scaled_features").rdd.map(lambda row: row[0].toArray()).collect()
            cluster_center = cluster_centers[i]
            intra_cluster_distance = np.mean([np.linalg.norm(features - cluster_center) for features in cluster_features])
            silhouette_samples.append(intra_cluster_distance)
    result_stats = cluster_stats.collect()
    result_counts = cluster_counts.collect()
    result_distribution = quality_cluster_distribution.collect()
    clustering_results = {
        "cluster_statistics": [row.asDict() for row in result_stats],
        "cluster_counts": [row.asDict() for row in result_counts],
        "quality_cluster_distribution": [row.asDict() for row in result_distribution],
        "cluster_centers": [center.tolist() for center in cluster_centers],
        "silhouette_scores": silhouette_samples,
        "optimal_clusters": optimal_k
    }
    return clustering_results

def correlation_matrix_analysis(wine_data):
    df = spark.createDataFrame(wine_data)
    numeric_columns = ["alcohol", "fixed_acidity", "volatile_acidity", "citric_acid", "residual_sugar", "chlorides", "free_sulfur_dioxide", "total_sulfur_dioxide", "density", "pH", "sulphates", "quality"]
    assembler = VectorAssembler(inputCols=numeric_columns, outputCol="features")
    feature_df = assembler.transform(df)
    correlation_matrix = Correlation.corr(feature_df, "features", "pearson").collect()[0][0]
    correlation_array = correlation_matrix.toArray()
    correlation_results = {}
    for i, col1 in enumerate(numeric_columns):
        correlation_results[col1] = {}
        for j, col2 in enumerate(numeric_columns):
            correlation_results[col1][col2] = float(correlation_array[i][j])
    strong_correlations = []
    for i, col1 in enumerate(numeric_columns):
        for j, col2 in enumerate(numeric_columns):
            if i < j and abs(correlation_array[i][j]) > 0.5:
                strong_correlations.append({
                    "variable1": col1,
                    "variable2": col2,
                    "correlation": float(correlation_array[i][j]),
                    "strength": "Strong" if abs(correlation_array[i][j]) > 0.7 else "Moderate"
                })
    quality_correlations = []
    quality_index = numeric_columns.index("quality")
    for i, column in enumerate(numeric_columns):
        if column != "quality":
            corr_value = correlation_array[i][quality_index]
            quality_correlations.append({
                "feature": column,
                "correlation_with_quality": float(corr_value),
                "absolute_correlation": abs(float(corr_value))
            })
    quality_correlations.sort(key=lambda x: x["absolute_correlation"], reverse=True)
    multicollinearity_pairs = []
    for i, col1 in enumerate(numeric_columns):
        for j, col2 in enumerate(numeric_columns):
            if i < j and abs(correlation_array[i][j]) > 0.8:
                multicollinearity_pairs.append({
                    "variable1": col1,
                    "variable2": col2,
                    "correlation": float(correlation_array[i][j])
                })
    analysis_results = {
        "correlation_matrix": correlation_results,
        "strong_correlations": strong_correlations,
        "quality_correlations": quality_correlations,
        "multicollinearity_pairs": multicollinearity_pairs,
        "feature_importance_ranking": [item["feature"] for item in quality_correlations[:5]]
    }
    return analysis_results

六、部分文档展示

在这里插入图片描述

七、END

💕💕文末获取源码联系计算机编程果茶熊