【数据分析】基于大数据的健康风险预测数据可视化分析系统 | 大数据毕设选题推荐 大数据实战项目 可视化大屏 Hadoop SPark java

49 阅读6分钟

💖💖作者:计算机毕业设计江挽 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目

基于大数据的健康风险预测数据可视化分析系统介绍

基于大数据的健康风险预测数据可视化分析系统是一个集数据采集、分布式存储、智能分析与可视化展示于一体的综合性平台。系统采用Hadoop作为分布式存储基础,利用HDFS实现海量健康数据的可靠存储,通过Spark引擎进行高效的数据处理与分析计算。后端基于Spring Boot框架构建RESTful接口服务,整合Spark SQL实现复杂的健康数据查询与统计分析,结合Pandas和NumPy进行数据清洗、特征工程与风险评估建模。前端采用Vue框架搭配ElementUI组件库构建交互界面,运用Echarts图表库实现生命体征趋势图、风险画像雷达图、患者聚类散点图等多维度数据可视化展示。系统核心功能包括健康风险预测数据的录入与管理、基于生命体征指标的多维分析、用户风险画像的智能绘制、医疗资源使用情况的统计分析以及基于K-Means算法的患者聚类分组。通过大数据技术的应用,系统能够处理百万级健康记录,实现秒级响应的查询与分析,为健康管理决策提供数据支撑。

基于大数据的健康风险预测数据可视化分析系统演示视频

演示视频

基于大数据的健康风险预测数据可视化分析系统演示图片

在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述

基于大数据的健康风险预测数据可视化分析系统代码展示

import org.apache.spark.sql.*;
import org.apache.spark.sql.types.*;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.*;
@Service
public class HealthRiskAnalysisService {
    private SparkSession spark = SparkSession.builder().appName("HealthRiskPrediction").master("local[*]").config("spark.sql.warehouse.dir", "/user/hive/warehouse").getOrCreate();
    @Resource
    private HealthDataMapper healthDataMapper;
    public Map<String, Object> analyzeVitalSigns(Long userId) {
        List<HealthData> dataList = healthDataMapper.selectByUserId(userId);
        Dataset<Row> dataset = spark.createDataFrame(dataList, HealthData.class);
        dataset.createOrReplaceTempView("vital_signs");
        Dataset<Row> avgMetrics = spark.sql("SELECT AVG(heartRate) as avgHeart, AVG(bloodPressureHigh) as avgBpHigh, AVG(bloodPressureLow) as avgBpLow, AVG(bloodSugar) as avgSugar, AVG(temperature) as avgTemp FROM vital_signs");
        Row metricsRow = avgMetrics.first();
        double avgHeart = metricsRow.getDouble(0);
        double avgBpHigh = metricsRow.getDouble(1);
        double avgBpLow = metricsRow.getDouble(2);
        double avgSugar = metricsRow.getDouble(3);
        double avgTemp = metricsRow.getDouble(4);
        Dataset<Row> trendData = spark.sql("SELECT DATE(checkTime) as date, AVG(heartRate) as dailyHeart, AVG(bloodPressureHigh) as dailyBp, AVG(bloodSugar) as dailySugar FROM vital_signs GROUP BY DATE(checkTime) ORDER BY date DESC LIMIT 30");
        List<Row> trendList = trendData.collectAsList();
        List<Map<String, Object>> trends = new ArrayList<>();
        for (Row row : trendList) {
            Map<String, Object> item = new HashMap<>();
            item.put("date", row.getString(0));
            item.put("heartRate", row.getDouble(1));
            item.put("bloodPressure", row.getDouble(2));
            item.put("bloodSugar", row.getDouble(3));
            trends.add(item);
        }
        Dataset<Row> abnormalCount = spark.sql("SELECT COUNT(*) as total, SUM(CASE WHEN heartRate > 100 OR heartRate < 60 THEN 1 ELSE 0 END) as abnormalHeart, SUM(CASE WHEN bloodPressureHigh > 140 OR bloodPressureLow > 90 THEN 1 ELSE 0 END) as abnormalBp, SUM(CASE WHEN bloodSugar > 6.1 THEN 1 ELSE 0 END) as abnormalSugar FROM vital_signs");
        Row abnormalRow = abnormalCount.first();
        long totalRecords = abnormalRow.getLong(0);
        long abnormalHeart = abnormalRow.getLong(1);
        long abnormalBp = abnormalRow.getLong(2);
        long abnormalSugar = abnormalRow.getLong(3);
        Map<String, Object> result = new HashMap<>();
        result.put("avgMetrics", Map.of("heartRate", avgHeart, "bloodPressureHigh", avgBpHigh, "bloodPressureLow", avgBpLow, "bloodSugar", avgSugar, "temperature", avgTemp));
        result.put("trends", trends);
        result.put("abnormalStats", Map.of("total", totalRecords, "abnormalHeartRate", abnormalHeart, "abnormalBloodPressure", abnormalBp, "abnormalBloodSugar", abnormalSugar));
        return result;
    }
    public Map<String, Object> generateRiskProfile(Long userId) {
        List<HealthData> dataList = healthDataMapper.selectByUserId(userId);
        Dataset<Row> dataset = spark.createDataFrame(dataList, HealthData.class);
        dataset.createOrReplaceTempView("health_records");
        Dataset<Row> riskMetrics = spark.sql("SELECT AVG(heartRate) as avgHeart, AVG(bloodPressureHigh) as avgBpHigh, AVG(bloodPressureLow) as avgBpLow, AVG(bloodSugar) as avgSugar, AVG(bmi) as avgBmi, AVG(cholesterol) as avgChol FROM health_records");
        Row metricsRow = riskMetrics.first();
        double heartRate = metricsRow.getDouble(0);
        double bpHigh = metricsRow.getDouble(1);
        double bpLow = metricsRow.getDouble(2);
        double bloodSugar = metricsRow.getDouble(3);
        double bmi = metricsRow.getDouble(4);
        double cholesterol = metricsRow.getDouble(5);
        double heartRisk = calculateHeartRisk(heartRate);
        double bpRisk = calculateBpRisk(bpHigh, bpLow);
        double sugarRisk = calculateSugarRisk(bloodSugar);
        double bmiRisk = calculateBmiRisk(bmi);
        double cholRisk = calculateCholesterolRisk(cholesterol);
        double overallRisk = (heartRisk + bpRisk + sugarRisk + bmiRisk + cholRisk) / 5.0;
        String riskLevel = overallRisk >= 0.7 ? "高风险" : overallRisk >= 0.4 ? "中风险" : "低风险";
        Dataset<Row> historicalRisk = spark.sql("SELECT DATE(checkTime) as date, AVG(heartRate) as hr, AVG(bloodPressureHigh) as bp, AVG(bloodSugar) as bs FROM health_records GROUP BY DATE(checkTime) ORDER BY date DESC LIMIT 90");
        List<Row> historyList = historicalRisk.collectAsList();
        List<Map<String, Object>> riskHistory = new ArrayList<>();
        for (Row row : historyList) {
            double dailyHr = row.getDouble(1);
            double dailyBp = row.getDouble(2);
            double dailyBs = row.getDouble(3);
            double dailyRisk = (calculateHeartRisk(dailyHr) + calculateBpRisk(dailyBp, 0) + calculateSugarRisk(dailyBs)) / 3.0;
            Map<String, Object> item = new HashMap<>();
            item.put("date", row.getString(0));
            item.put("riskScore", dailyRisk);
            riskHistory.add(item);
        }
        Map<String, Object> result = new HashMap<>();
        result.put("riskProfile", Map.of("heartRisk", heartRisk, "bloodPressureRisk", bpRisk, "bloodSugarRisk", sugarRisk, "bmiRisk", bmiRisk, "cholesterolRisk", cholRisk));
        result.put("overallRisk", overallRisk);
        result.put("riskLevel", riskLevel);
        result.put("riskHistory", riskHistory);
        return result;
    }
    public Map<String, Object> performPatientClustering() {
        List<HealthData> allData = healthDataMapper.selectAllLatest();
        Dataset<Row> dataset = spark.createDataFrame(allData, HealthData.class);
        dataset.createOrReplaceTempView("all_patients");
        Dataset<Row> patientFeatures = spark.sql("SELECT userId, AVG(heartRate) as avgHeart, AVG(bloodPressureHigh) as avgBp, AVG(bloodSugar) as avgSugar, AVG(bmi) as avgBmi, AVG(age) as age FROM all_patients GROUP BY userId");
        List<Row> featureList = patientFeatures.collectAsList();
        Map<Long, double[]> patientVectors = new HashMap<>();
        for (Row row : featureList) {
            Long userId = row.getLong(0);
            double[] features = new double[]{row.getDouble(1), row.getDouble(2), row.getDouble(3), row.getDouble(4), row.getDouble(5)};
            double[] normalized = normalizeFeatures(features);
            patientVectors.put(userId, normalized);
        }
        int k = 3;
        Map<Long, Integer> clusterAssignments = kMeansClustering(patientVectors, k);
        Map<Integer, List<Long>> clusters = new HashMap<>();
        for (Map.Entry<Long, Integer> entry : clusterAssignments.entrySet()) {
            clusters.computeIfAbsent(entry.getValue(), key -> new ArrayList<>()).add(entry.getKey());
        }
        Map<Integer, Map<String, Double>> clusterCenters = new HashMap<>();
        for (Map.Entry<Integer, List<Long>> entry : clusters.entrySet()) {
            int clusterId = entry.getKey();
            List<Long> userIds = entry.getValue();
            double sumHeart = 0, sumBp = 0, sumSugar = 0, sumBmi = 0, sumAge = 0;
            for (Long userId : userIds) {
                double[] features = patientVectors.get(userId);
                sumHeart += features[0];
                sumBp += features[1];
                sumSugar += features[2];
                sumBmi += features[3];
                sumAge += features[4];
            }
            int count = userIds.size();
            Map<String, Double> center = new HashMap<>();
            center.put("heartRate", sumHeart / count);
            center.put("bloodPressure", sumBp / count);
            center.put("bloodSugar", sumSugar / count);
            center.put("bmi", sumBmi / count);
            center.put("age", sumAge / count);
            clusterCenters.put(clusterId, center);
        }
        Map<String, Object> result = new HashMap<>();
        result.put("clusterCount", k);
        result.put("clusterAssignments", clusterAssignments);
        result.put("clusterSizes", clusters.entrySet().stream().collect(HashMap::new, (map, e) -> map.put(e.getKey(), e.getValue().size()), HashMap::putAll));
        result.put("clusterCenters", clusterCenters);
        return result;
    }
    private double calculateHeartRisk(double heartRate) {
        if (heartRate < 60 || heartRate > 100) return 0.8;
        if (heartRate < 65 || heartRate > 95) return 0.5;
        return 0.2;
    }
    private double calculateBpRisk(double high, double low) {
        if (high > 140 || low > 90) return 0.9;
        if (high > 130 || low > 85) return 0.6;
        if (high < 90) return 0.4;
        return 0.2;
    }
    private double calculateSugarRisk(double bloodSugar) {
        if (bloodSugar > 7.0) return 0.9;
        if (bloodSugar > 6.1) return 0.6;
        if (bloodSugar < 3.9) return 0.5;
        return 0.1;
    }
    private double calculateBmiRisk(double bmi) {
        if (bmi > 28) return 0.8;
        if (bmi > 24) return 0.5;
        if (bmi < 18.5) return 0.4;
        return 0.1;
    }
    private double calculateCholesterolRisk(double cholesterol) {
        if (cholesterol > 6.2) return 0.9;
        if (cholesterol > 5.7) return 0.6;
        return 0.2;
    }
    private double[] normalizeFeatures(double[] features) {
        double[] normalized = new double[features.length];
        normalized[0] = features[0] / 120.0;
        normalized[1] = features[1] / 160.0;
        normalized[2] = features[2] / 10.0;
        normalized[3] = features[3] / 35.0;
        normalized[4] = features[4] / 100.0;
        return normalized;
    }
    private Map<Long, Integer> kMeansClustering(Map<Long, double[]> vectors, int k) {
        List<Long> userIds = new ArrayList<>(vectors.keySet());
        List<double[]> centroids = new ArrayList<>();
        Random random = new Random(42);
        for (int i = 0; i < k; i++) {
            centroids.add(vectors.get(userIds.get(random.nextInt(userIds.size()))).clone());
        }
        Map<Long, Integer> assignments = new HashMap<>();
        for (int iter = 0; iter < 100; iter++) {
            for (Long userId : userIds) {
                double[] vector = vectors.get(userId);
                int closestCluster = 0;
                double minDistance = Double.MAX_VALUE;
                for (int i = 0; i < k; i++) {
                    double distance = euclideanDistance(vector, centroids.get(i));
                    if (distance < minDistance) {
                        minDistance = distance;
                        closestCluster = i;
                    }
                }
                assignments.put(userId, closestCluster);
            }
            List<double[]> newCentroids = new ArrayList<>();
            for (int i = 0; i < k; i++) {
                List<double[]> clusterVectors = new ArrayList<>();
                for (Map.Entry<Long, Integer> entry : assignments.entrySet()) {
                    if (entry.getValue() == i) {
                        clusterVectors.add(vectors.get(entry.getKey()));
                    }
                }
                if (clusterVectors.isEmpty()) {
                    newCentroids.add(centroids.get(i));
                } else {
                    double[] newCentroid = new double[clusterVectors.get(0).length];
                    for (double[] vec : clusterVectors) {
                        for (int j = 0; j < vec.length; j++) {
                            newCentroid[j] += vec[j];
                        }
                    }
                    for (int j = 0; j < newCentroid.length; j++) {
                        newCentroid[j] /= clusterVectors.size();
                    }
                    newCentroids.add(newCentroid);
                }
            }
            centroids = newCentroids;
        }
        return assignments;
    }
    private double euclideanDistance(double[] a, double[] b) {
        double sum = 0;
        for (int i = 0; i < a.length; i++) {
            sum += Math.pow(a[i] - b[i], 2);
        }
        return Math.sqrt(sum);
    }
}

基于大数据的健康风险预测数据可视化分析系统文档展示

在这里插入图片描述

💖💖作者:计算机毕业设计江挽 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目