💖💖作者:计算机毕业设计江挽 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目
基于大数据的全球用水量数据分析系统介绍
《全球用水量数据分析系统》是一套基于大数据技术架构的综合性水资源分析平台,采用Hadoop分布式存储和Spark大数据计算引擎作为核心技术底座,结合Django后端框架和Vue前端技术栈构建。系统通过HDFS分布式文件系统存储全球各国用水量历史数据,利用Spark SQL进行大规模数据查询和计算,配合Pandas、NumPy等Python数据科学库实现深度数据挖掘。平台提供系统首页、我的信息、用户管理等基础功能模块,核心业务涵盖全球用水量管理、大屏可视化展示、多维关联聚类分析、多国用水横向分析、重点国家深度分析、全球用水时序分析以及稀缺状况归因分析等功能。系统前端采用Vue+ElementUI构建响应式界面,通过Echarts图表库实现数据的多维度可视化呈现,为用户提供直观的数据分析结果。整个系统架构充分发挥了大数据技术在海量数据处理方面的优势,为全球水资源状况分析提供了技术支撑平台。
基于大数据的全球用水量数据分析系统演示视频
基于大数据的全球用水量数据分析系统演示图片
基于大数据的全球用水量数据分析系统代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, sum, avg, max, min, count, year, month, desc, asc
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.clustering import KMeans
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import pandas as pd
import numpy as np
import json
spark = SparkSession.builder.appName("GlobalWaterAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()
@csrf_exempt
def multi_dimensional_clustering_analysis(request):
if request.method == 'POST':
data = json.loads(request.body)
countries = data.get('countries', [])
years = data.get('years', [])
df = spark.sql(f"SELECT country_name, year, total_water_usage, agricultural_usage, industrial_usage, domestic_usage, water_efficiency_index FROM global_water_usage WHERE country_name IN ({','.join(['\''+c+'\'' for c in countries])}) AND year BETWEEN {min(years)} AND {max(years)}")
pandas_df = df.toPandas()
feature_cols = ['total_water_usage', 'agricultural_usage', 'industrial_usage', 'domestic_usage', 'water_efficiency_index']
assembler = VectorAssembler(inputCols=feature_cols, outputCol="features")
feature_df = assembler.transform(df.fillna(0))
kmeans = KMeans(featuresCol="features", predictionCol="cluster", k=4, seed=42)
model = kmeans.fit(feature_df)
predictions = model.transform(feature_df)
cluster_results = predictions.select("country_name", "year", "cluster", *feature_cols).collect()
cluster_centers = model.clusterCenters()
result_data = []
for row in cluster_results:
result_data.append({
'country': row.country_name,
'year': row.year,
'cluster': int(row.cluster),
'features': {
'total_water_usage': float(row.total_water_usage),
'agricultural_usage': float(row.agricultural_usage),
'industrial_usage': float(row.industrial_usage),
'domestic_usage': float(row.domestic_usage),
'water_efficiency_index': float(row.water_efficiency_index)
}
})
cluster_summary = []
for i, center in enumerate(cluster_centers):
cluster_summary.append({
'cluster_id': i,
'center_features': center.tolist(),
'countries_count': len([r for r in result_data if r['cluster'] == i])
})
return JsonResponse({
'status': 'success',
'clustering_results': result_data,
'cluster_centers': cluster_summary,
'total_data_points': len(result_data)
})
@csrf_exempt
def multi_country_horizontal_analysis(request):
if request.method == 'POST':
data = json.loads(request.body)
target_countries = data.get('countries', [])
analysis_year = data.get('year', 2023)
comparison_metrics = data.get('metrics', ['total_water_usage', 'per_capita_usage', 'efficiency_ranking'])
base_df = spark.sql(f"SELECT * FROM global_water_usage WHERE year = {analysis_year} AND country_name IN ({','.join(['\''+c+'\'' for c in target_countries])})")
country_stats = base_df.groupBy("country_name").agg(
sum("total_water_usage").alias("total_usage"),
avg("per_capita_water_usage").alias("avg_per_capita"),
avg("water_efficiency_index").alias("efficiency_score"),
sum("agricultural_usage").alias("agri_total"),
sum("industrial_usage").alias("industrial_total"),
sum("domestic_usage").alias("domestic_total")
).collect()
global_avg_df = spark.sql(f"SELECT AVG(total_water_usage) as global_avg_total, AVG(per_capita_water_usage) as global_avg_per_capita, AVG(water_efficiency_index) as global_efficiency FROM global_water_usage WHERE year = {analysis_year}")
global_benchmarks = global_avg_df.collect()[0]
comparison_results = []
for country_row in country_stats:
country_name = country_row.country_name
usage_ratio_to_global = float(country_row.total_usage) / float(global_benchmarks.global_avg_total) if global_benchmarks.global_avg_total > 0 else 0
efficiency_ratio = float(country_row.efficiency_score) / float(global_benchmarks.global_efficiency) if global_benchmarks.global_efficiency > 0 else 0
sector_distribution = {
'agricultural_pct': (float(country_row.agri_total) / float(country_row.total_usage)) * 100 if country_row.total_usage > 0 else 0,
'industrial_pct': (float(country_row.industrial_total) / float(country_row.total_usage)) * 100 if country_row.total_usage > 0 else 0,
'domestic_pct': (float(country_row.domestic_total) / float(country_row.total_usage)) * 100 if country_row.total_usage > 0 else 0
}
comparison_results.append({
'country': country_name,
'total_usage': float(country_row.total_usage),
'per_capita_usage': float(country_row.avg_per_capita),
'efficiency_score': float(country_row.efficiency_score),
'usage_ratio_to_global': round(usage_ratio_to_global, 3),
'efficiency_ratio_to_global': round(efficiency_ratio, 3),
'sector_distribution': sector_distribution,
'ranking_position': 0
})
comparison_results.sort(key=lambda x: x['total_usage'], reverse=True)
for idx, country_data in enumerate(comparison_results):
country_data['ranking_position'] = idx + 1
return JsonResponse({
'status': 'success',
'analysis_year': analysis_year,
'countries_comparison': comparison_results,
'global_benchmarks': {
'avg_total_usage': float(global_benchmarks.global_avg_total),
'avg_per_capita': float(global_benchmarks.global_avg_per_capita),
'avg_efficiency': float(global_benchmarks.global_efficiency)
}
})
@csrf_exempt
def scarcity_attribution_analysis(request):
if request.method == 'POST':
data = json.loads(request.body)
analysis_countries = data.get('countries', [])
time_range = data.get('time_range', {'start_year': 2010, 'end_year': 2023})
scarcity_threshold = data.get('scarcity_threshold', 1000)
df = spark.sql(f"SELECT country_name, year, total_water_usage, per_capita_water_usage, water_availability, population, gdp_per_capita, rainfall_index, temperature_index FROM global_water_usage WHERE country_name IN ({','.join(['\''+c+'\'' for c in analysis_countries])}) AND year BETWEEN {time_range['start_year']} AND {time_range['end_year']}")
scarcity_df = df.withColumn("water_stress_ratio", col("total_water_usage") / col("water_availability")).withColumn("scarcity_level",
when(col("per_capita_water_usage") < scarcity_threshold, "High").when(col("per_capita_water_usage") < scarcity_threshold * 2, "Medium").otherwise("Low"))
pandas_df = scarcity_df.toPandas()
correlation_analysis = pandas_df[['total_water_usage', 'water_availability', 'population', 'gdp_per_capita', 'rainfall_index', 'temperature_index']].corr()
attribution_results = []
for country in analysis_countries:
country_data = pandas_df[pandas_df['country_name'] == country]
if len(country_data) > 0:
trend_analysis = {
'usage_trend': np.polyfit(country_data['year'], country_data['total_water_usage'], 1)[0],
'availability_trend': np.polyfit(country_data['year'], country_data['water_availability'], 1)[0],
'population_growth': np.polyfit(country_data['year'], country_data['population'], 1)[0],
'climate_impact': np.corrcoef(country_data['rainfall_index'], country_data['water_availability'])[0,1]
}
scarcity_years = country_data[country_data['scarcity_level'] == 'High']['year'].tolist()
risk_factors = []
if trend_analysis['usage_trend'] > 0 and trend_analysis['availability_trend'] < 0:
risk_factors.append("increasing_demand_decreasing_supply")
if trend_analysis['population_growth'] > 50000:
risk_factors.append("rapid_population_growth")
if abs(trend_analysis['climate_impact']) > 0.6:
risk_factors.append("climate_variability")
attribution_score = len(scarcity_years) / len(country_data) * 100
attribution_results.append({
'country': country,
'attribution_score': round(attribution_score, 2),
'primary_risk_factors': risk_factors,
'trend_indicators': trend_analysis,
'scarcity_years': scarcity_years,
'current_status': country_data.iloc[-1]['scarcity_level'] if len(country_data) > 0 else "Unknown"
})
return JsonResponse({
'status': 'success',
'attribution_analysis': attribution_results,
'correlation_matrix': correlation_analysis.to_dict(),
'analysis_summary': {
'total_countries_analyzed': len(attribution_results),
'high_risk_countries': len([r for r in attribution_results if r['attribution_score'] > 30]),
'average_attribution_score': np.mean([r['attribution_score'] for r in attribution_results])
}
})
基于大数据的全球用水量数据分析系统文档展示
💖💖作者:计算机毕业设计江挽 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目