一、个人简介
💖💖作者:计算机编程果茶熊 💙💙个人简介:曾长期从事计算机专业培训教学,担任过编程老师,同时本人也热爱上课教学,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 计算机毕业设计选题 💕💕文末获取源码联系计算机编程果茶熊
二、系统介绍
大数据框架:Hadoop+Spark(Hive需要定制修改) 开发语言:Java+Python(两个版本都支持) 数据库:MySQL 后端框架:SpringBoot(Spring+SpringMVC+Mybatis)+Django(两个版本都支持) 前端:Vue+Echarts+HTML+CSS+JavaScript+jQuery
新能源充电安全与热失控预警分析系统是一套基于大数据技术的新能源汽车充电安全监测平台。系统采用Hadoop+Spark分布式计算框架作为核心数据处理引擎,通过HDFS存储海量充电设备运行数据,利用Spark SQL进行大规模数据分析与处理。后端基于Django框架开发,前端采用Vue+ElementUI构建交互界面,通过Echarts实现数据可视化呈现。系统核心功能涵盖充电安全实时监测、热失控风险智能预警、电池健康状态评估、温度异常识别、设备运行状态跟踪以及时间序列安全趋势分析等模块。通过Python结合Pandas、NumPy等科学计算库对充电过程中的电压、电流、温度等多维度参数进行分析建模,实现对充电异常情况的提前预警。系统提供可视化大屏功能,将分析结果以直观的图表形式展示,帮助运维人员及时掌握充电设施安全状况,降低热失控等安全事故发生概率,保障新能源汽车充电过程的安全性与可靠性。
三、视频解说
四、部分功能展示
五、部分代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, avg, stddev, window, when, lag, abs as spark_abs
from pyspark.sql.window import Window
import pandas as pd
import numpy as np
from django.http import JsonResponse
from datetime import datetime, timedelta
spark = SparkSession.builder.appName("ChargingSecurityAnalysis").config("spark.sql.shuffle.partitions", "200").config("spark.executor.memory", "4g").getOrCreate()
def thermal_runaway_risk_analysis(request):
charging_data = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/charging_db").option("dbtable", "charging_records").option("user", "root").option("password", "password").load()
charging_data = charging_data.filter(col("status") == "charging")
window_spec = Window.partitionBy("device_id").orderBy("timestamp").rowsBetween(-10, 0)
charging_data = charging_data.withColumn("temp_avg", avg("temperature").over(window_spec))
charging_data = charging_data.withColumn("temp_stddev", stddev("temperature").over(window_spec))
charging_data = charging_data.withColumn("voltage_avg", avg("voltage").over(window_spec))
charging_data = charging_data.withColumn("current_avg", avg("current").over(window_spec))
charging_data = charging_data.withColumn("temp_change_rate", (col("temperature") - lag("temperature", 1).over(Window.partitionBy("device_id").orderBy("timestamp"))) / 60)
risk_data = charging_data.withColumn("risk_score", when((col("temperature") > 60) & (col("temp_change_rate") > 0.5), 90).when((col("temperature") > 55) & (col("temp_stddev") > 3), 75).when((col("voltage_avg") > 420) & (col("current_avg") > 150), 70).when(col("temperature") > 50, 50).otherwise(20))
high_risk_devices = risk_data.filter(col("risk_score") >= 70).groupBy("device_id").agg({"risk_score": "max", "temperature": "max", "voltage_avg": "avg"}).withColumnRenamed("max(risk_score)", "max_risk_score").withColumnRenamed("max(temperature)", "max_temp").withColumnRenamed("avg(voltage_avg)", "avg_voltage")
result_df = high_risk_devices.toPandas()
risk_analysis_result = []
for index, row in result_df.iterrows():
risk_level = "高危" if row['max_risk_score'] >= 90 else "中高危"
risk_analysis_result.append({"device_id": row['device_id'], "risk_score": int(row['max_risk_score']), "risk_level": risk_level, "max_temperature": round(float(row['max_temp']), 2), "avg_voltage": round(float(row['avg_voltage']), 2), "warning_msg": f"设备{row['device_id']}存在热失控风险,建议立即检查"})
return JsonResponse({"code": 200, "data": risk_analysis_result, "total": len(risk_analysis_result)})
def battery_health_status_analysis(request):
battery_data = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/charging_db").option("dbtable", "battery_status").option("user", "root").option("password", "password").load()
battery_data = battery_data.filter(col("battery_id").isNotNull())
window_charge_cycle = Window.partitionBy("battery_id").orderBy("record_time")
battery_data = battery_data.withColumn("capacity_decay_rate", (1 - col("current_capacity") / col("rated_capacity")) * 100)
battery_data = battery_data.withColumn("voltage_diff", spark_abs(col("actual_voltage") - col("standard_voltage")))
battery_data = battery_data.withColumn("internal_resistance", col("voltage_diff") / col("current"))
battery_data = battery_data.withColumn("charge_cycles", col("total_charge_count"))
health_score_data = battery_data.withColumn("health_score", 100 - col("capacity_decay_rate") * 0.6 - (col("internal_resistance") - 50) / 10 * 0.3 - col("charge_cycles") / 20 * 0.1)
health_score_data = health_score_data.withColumn("health_score", when(col("health_score") > 100, 100).when(col("health_score") < 0, 0).otherwise(col("health_score")))
health_score_data = health_score_data.withColumn("health_level", when(col("health_score") >= 85, "优秀").when(col("health_score") >= 70, "良好").when(col("health_score") >= 50, "一般").otherwise("较差"))
battery_health_stats = health_score_data.groupBy("battery_id").agg({"health_score": "avg", "capacity_decay_rate": "avg", "internal_resistance": "avg", "charge_cycles": "max"}).withColumnRenamed("avg(health_score)", "avg_health_score").withColumnRenamed("avg(capacity_decay_rate)", "avg_decay_rate").withColumnRenamed("avg(internal_resistance)", "avg_resistance").withColumnRenamed("max(charge_cycles)", "total_cycles")
result_pd = battery_health_stats.toPandas()
health_result_list = []
for idx, record in result_pd.iterrows():
health_level = "优秀" if record['avg_health_score'] >= 85 else ("良好" if record['avg_health_score'] >= 70 else ("一般" if record['avg_health_score'] >= 50 else "较差"))
suggestion = "电池状态良好,继续保持" if record['avg_health_score'] >= 70 else "建议进行电池维护检查" if record['avg_health_score'] >= 50 else "建议更换电池"
health_result_list.append({"battery_id": record['battery_id'], "health_score": round(float(record['avg_health_score']), 2), "health_level": health_level, "capacity_decay": round(float(record['avg_decay_rate']), 2), "internal_resistance": round(float(record['avg_resistance']), 2), "charge_cycles": int(record['total_cycles']), "maintenance_suggestion": suggestion})
return JsonResponse({"code": 200, "message": "电池健康分析完成", "data": health_result_list, "count": len(health_result_list)})
def time_series_security_trend_analysis(request):
time_range = request.GET.get('days', 7)
end_date = datetime.now()
start_date = end_date - timedelta(days=int(time_range))
security_data = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/charging_db").option("dbtable", "security_events").option("user", "root").option("password", "password").load()
security_data = security_data.filter((col("event_time") >= start_date.strftime("%Y-%m-%d")) & (col("event_time") <= end_date.strftime("%Y-%m-%d")))
hourly_stats = security_data.groupBy(window("event_time", "1 hour"), "event_type").count().withColumnRenamed("count", "event_count")
hourly_stats = hourly_stats.withColumn("hour_timestamp", col("window.start"))
window_trend = Window.partitionBy("event_type").orderBy("hour_timestamp").rowsBetween(-24, 0)
trend_data = hourly_stats.withColumn("moving_avg", avg("event_count").over(window_trend))
trend_data = trend_data.withColumn("trend_indicator", when(col("event_count") > col("moving_avg") * 1.5, "上升").when(col("event_count") < col("moving_avg") * 0.5, "下降").otherwise("平稳"))
severity_mapping = {"temperature_alarm": 3, "voltage_alarm": 4, "current_alarm": 3, "connection_error": 2, "system_error": 1}
event_severity = security_data.groupBy("event_type").count().toPandas()
event_severity['severity_score'] = event_severity['event_type'].map(severity_mapping).fillna(1) * event_severity['count']
top_risk_events = event_severity.nlargest(5, 'severity_score')
trend_pd = trend_data.toPandas()
trend_pd['hour_timestamp'] = pd.to_datetime(trend_pd['hour_timestamp'])
trend_pd = trend_pd.sort_values('hour_timestamp')
time_series_result = []
for event_type in trend_pd['event_type'].unique():
event_trend = trend_pd[trend_pd['event_type'] == event_type].tail(24)
trend_array = event_trend['event_count'].values
moving_avg_array = event_trend['moving_avg'].values
time_labels = event_trend['hour_timestamp'].dt.strftime('%Y-%m-%d %H:%M').tolist()
trend_direction = "上升" if len(trend_array) > 1 and trend_array[-1] > np.mean(trend_array) else "下降" if len(trend_array) > 1 and trend_array[-1] < np.mean(trend_array) else "平稳"
time_series_result.append({"event_type": event_type, "time_labels": time_labels, "event_counts": trend_array.tolist(), "moving_average": moving_avg_array.tolist(), "trend_direction": trend_direction, "total_events": int(np.sum(trend_array)), "peak_value": int(np.max(trend_array)), "avg_value": round(float(np.mean(trend_array)), 2)})
top_risks = [{"event_type": row['event_type'], "severity_score": round(float(row['severity_score']), 2), "occurrence_count": int(row['count'])} for idx, row in top_risk_events.iterrows()]
return JsonResponse({"code": 200, "message": "时间序列趋势分析完成", "trend_data": time_series_result, "top_risk_events": top_risks, "analysis_period": f"{start_date.strftime('%Y-%m-%d')} 至 {end_date.strftime('%Y-%m-%d')}"})
六、部分文档展示
七、END
💕💕文末获取源码联系计算机编程果茶熊