💖💖作者:计算机编程小咖 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目
@TOC
基于大数据的健康保险数据可视化分析系统介绍
基于大数据的健康保险数据可视化分析系统是一个面向大数据分析与可视化场景的综合性应用平台,该系统采用Hadoop+Spark大数据技术架构作为底层数据处理引擎,结合Python语言的Pandas、NumPy数据分析库以及Java语言的Spring Boot开发框架(亦支持Python的Django框架版本),实现了对海量健康保险数据的高效存储、分布式计算与深度挖掘。系统前端采用Vue.js框架搭配ElementUI组件库和Echarts可视化图表库,通过HTML、CSS、JavaScript、jQuery等前端技术构建了响应式交互界面,后端基于Spring、SpringMVC、MyBatis技术栈实现业务逻辑处理,数据持久化采用MySQL关系型数据库。功能模块涵盖系统首页、个人中心、修改密码、个人信息管理、用户管理、健康保险数据管理等基础功能,核心分析模块包括大屏可视化分析、综合聚类分析、医疗费用关联分析、投保人画像分析以及保费特征分析,通过Hadoop HDFS分布式文件系统存储健康保险原始数据,利用Spark SQL进行结构化数据查询与转换,借助Spark的分布式计算能力完成聚类算法、关联规则挖掘等数据分析任务,最终通过Echarts将分析结果以柱状图、饼图、折线图、热力图等多维度图表形式进行直观展示,为健康保险行业的数据分析与决策支持提供了完整的大数据技术解决方案。
基于大数据的健康保险数据可视化分析系统演示视频
基于大数据的健康保险数据可视化分析系统演示图片
基于大数据的健康保险数据可视化分析系统代码展示
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.ml.clustering.KMeans;
import org.apache.spark.ml.clustering.KMeansModel;
import org.apache.spark.ml.feature.VectorAssembler;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.sql.functions;
import org.springframework.stereotype.Service;
import java.util.*;
@Service
public class HealthInsuranceAnalysisService {
private SparkSession spark = SparkSession.builder().appName("HealthInsuranceAnalysis").master("local[*]").config("spark.sql.warehouse.dir", "/user/hive/warehouse").getOrCreate();
public Map<String, Object> comprehensiveClusteringAnalysis() {
Dataset<Row> insuranceData = spark.read().format("jdbc").option("url", "jdbc:mysql://localhost:3306/health_insurance").option("dbtable", "insurance_data").option("user", "root").option("password", "root").load();
Dataset<Row> selectedFeatures = insuranceData.select("age", "bmi", "annual_premium", "medical_history_score", "claim_frequency");
Dataset<Row> cleanedData = selectedFeatures.na().fill(0);
VectorAssembler assembler = new VectorAssembler().setInputCols(new String[]{"age", "bmi", "annual_premium", "medical_history_score", "claim_frequency"}).setOutputCol("features");
Dataset<Row> featureVector = assembler.transform(cleanedData);
KMeans kmeans = new KMeans().setK(5).setSeed(1L).setFeaturesCol("features").setPredictionCol("cluster");
KMeansModel model = kmeans.fit(featureVector);
Dataset<Row> predictions = model.transform(featureVector);
Dataset<Row> clusterStats = predictions.groupBy("cluster").agg(functions.count("cluster").alias("count"), functions.avg("age").alias("avg_age"), functions.avg("bmi").alias("avg_bmi"), functions.avg("annual_premium").alias("avg_premium"), functions.avg("medical_history_score").alias("avg_health_score"), functions.avg("claim_frequency").alias("avg_claim_freq"));
List<Row> clusterResults = clusterStats.collectAsList();
Map<String, Object> resultMap = new HashMap<>();
List<Map<String, Object>> clusterList = new ArrayList<>();
for (Row row : clusterResults) {
Map<String, Object> clusterInfo = new HashMap<>();
clusterInfo.put("clusterId", row.getAs("cluster"));
clusterInfo.put("personCount", row.getAs("count"));
clusterInfo.put("averageAge", Math.round(row.getDouble(row.fieldIndex("avg_age")) * 100.0) / 100.0);
clusterInfo.put("averageBmi", Math.round(row.getDouble(row.fieldIndex("avg_bmi")) * 100.0) / 100.0);
clusterInfo.put("averagePremium", Math.round(row.getDouble(row.fieldIndex("avg_premium")) * 100.0) / 100.0);
clusterInfo.put("averageHealthScore", Math.round(row.getDouble(row.fieldIndex("avg_health_score")) * 100.0) / 100.0);
clusterInfo.put("averageClaimFreq", Math.round(row.getDouble(row.fieldIndex("avg_claim_freq")) * 100.0) / 100.0);
clusterList.add(clusterInfo);
}
resultMap.put("clusterData", clusterList);
resultMap.put("totalClusters", clusterResults.size());
return resultMap;
}
public Map<String, Object> medicalCostAssociationAnalysis() {
Dataset<Row> costData = spark.read().format("jdbc").option("url", "jdbc:mysql://localhost:3306/health_insurance").option("dbtable", "insurance_data").option("user", "root").option("password", "root").load();
Dataset<Row> costFeatures = costData.select("medical_cost", "hospitalization_days", "surgery_count", "chronic_disease_count", "prescription_count", "age", "bmi");
Dataset<Row> validCostData = costFeatures.filter(functions.col("medical_cost").gt(0));
Dataset<Row> costCorrelation = validCostData.stat().corr("medical_cost", "hospitalization_days");
double hospCorr = costCorrelation.first().getDouble(0);
double surgeryCorr = validCostData.stat().corr("medical_cost", "surgery_count").first().getDouble(0);
double chronicCorr = validCostData.stat().corr("medical_cost", "chronic_disease_count").first().getDouble(0);
double prescriptionCorr = validCostData.stat().corr("medical_cost", "prescription_count").first().getDouble(0);
double ageCorr = validCostData.stat().corr("medical_cost", "age").first().getDouble(0);
double bmiCorr = validCostData.stat().corr("medical_cost", "bmi").first().getDouble(0);
Dataset<Row> costByAge = validCostData.groupBy(functions.floor(functions.col("age").divide(10)).multiply(10).alias("age_group")).agg(functions.avg("medical_cost").alias("avg_cost"), functions.count("medical_cost").alias("count"));
List<Row> ageGroupCost = costByAge.orderBy("age_group").collectAsList();
Dataset<Row> costByDisease = validCostData.groupBy("chronic_disease_count").agg(functions.avg("medical_cost").alias("avg_cost"), functions.max("medical_cost").alias("max_cost"), functions.min("medical_cost").alias("min_cost"), functions.count("medical_cost").alias("count"));
List<Row> diseaseCost = costByDisease.orderBy("chronic_disease_count").collectAsList();
Map<String, Object> associationMap = new HashMap<>();
Map<String, Double> correlationMap = new HashMap<>();
correlationMap.put("hospitalizationCorrelation", Math.round(hospCorr * 1000.0) / 1000.0);
correlationMap.put("surgeryCorrelation", Math.round(surgeryCorr * 1000.0) / 1000.0);
correlationMap.put("chronicDiseaseCorrelation", Math.round(chronicCorr * 1000.0) / 1000.0);
correlationMap.put("prescriptionCorrelation", Math.round(prescriptionCorr * 1000.0) / 1000.0);
correlationMap.put("ageCorrelation", Math.round(ageCorr * 1000.0) / 1000.0);
correlationMap.put("bmiCorrelation", Math.round(bmiCorr * 1000.0) / 1000.0);
List<Map<String, Object>> ageGroupList = new ArrayList<>();
for (Row row : ageGroupCost) {
Map<String, Object> ageGroup = new HashMap<>();
ageGroup.put("ageRange", row.getAs("age_group") + "-" + (row.getInt(0) + 9));
ageGroup.put("averageCost", Math.round(row.getDouble(row.fieldIndex("avg_cost")) * 100.0) / 100.0);
ageGroup.put("sampleCount", row.getAs("count"));
ageGroupList.add(ageGroup);
}
List<Map<String, Object>> diseaseCostList = new ArrayList<>();
for (Row row : diseaseCost) {
Map<String, Object> diseaseInfo = new HashMap<>();
diseaseInfo.put("diseaseCount", row.getAs("chronic_disease_count"));
diseaseInfo.put("averageCost", Math.round(row.getDouble(row.fieldIndex("avg_cost")) * 100.0) / 100.0);
diseaseInfo.put("maxCost", Math.round(row.getDouble(row.fieldIndex("max_cost")) * 100.0) / 100.0);
diseaseInfo.put("minCost", Math.round(row.getDouble(row.fieldIndex("min_cost")) * 100.0) / 100.0);
diseaseInfo.put("sampleCount", row.getAs("count"));
diseaseCostList.add(diseaseInfo);
}
associationMap.put("correlationFactors", correlationMap);
associationMap.put("ageGroupAnalysis", ageGroupList);
associationMap.put("diseaseCountAnalysis", diseaseCostList);
return associationMap;
}
public Map<String, Object> policyholderPortraitAnalysis() {
Dataset<Row> policyholderData = spark.read().format("jdbc").option("url", "jdbc:mysql://localhost:3306/health_insurance").option("dbtable", "insurance_data").option("user", "root").option("password", "root").load();
Dataset<Row> ageDistribution = policyholderData.groupBy(functions.floor(functions.col("age").divide(10)).multiply(10).alias("age_group")).agg(functions.count("*").alias("count"));
List<Row> ageList = ageDistribution.orderBy("age_group").collectAsList();
Dataset<Row> genderDistribution = policyholderData.groupBy("gender").agg(functions.count("*").alias("count"), functions.avg("annual_premium").alias("avg_premium"));
List<Row> genderList = genderDistribution.collectAsList();
Dataset<Row> regionDistribution = policyholderData.groupBy("region").agg(functions.count("*").alias("count"), functions.avg("claim_amount").alias("avg_claim"));
List<Row> regionList = regionDistribution.orderBy(functions.desc("count")).collectAsList();
Dataset<Row> occupationDistribution = policyholderData.groupBy("occupation").agg(functions.count("*").alias("count"), functions.avg("medical_history_score").alias("avg_health_score"));
List<Row> occupationList = occupationDistribution.orderBy(functions.desc("count")).collectAsList();
Dataset<Row> bmiCategoryData = policyholderData.withColumn("bmi_category", functions.when(functions.col("bmi").lt(18.5), "偏瘦").when(functions.col("bmi").between(18.5, 24), "正常").when(functions.col("bmi").between(24, 28), "偏胖").otherwise("肥胖"));
Dataset<Row> bmiDistribution = bmiCategoryData.groupBy("bmi_category").agg(functions.count("*").alias("count"), functions.avg("annual_premium").alias("avg_premium"));
List<Row> bmiList = bmiDistribution.collectAsList();
Dataset<Row> smokingDistribution = policyholderData.groupBy("smoking_status").agg(functions.count("*").alias("count"), functions.avg("claim_frequency").alias("avg_claim_freq"));
List<Row> smokingList = smokingDistribution.collectAsList();
Dataset<Row> incomeDistribution = policyholderData.groupBy(functions.floor(functions.col("annual_income").divide(50000)).multiply(50000).alias("income_group")).agg(functions.count("*").alias("count"), functions.avg("annual_premium").alias("avg_premium"));
List<Row> incomeList = incomeDistribution.orderBy("income_group").collectAsList();
Map<String, Object> portraitMap = new HashMap<>();
List<Map<String, Object>> agePortrait = new ArrayList<>();
for (Row row : ageList) {
Map<String, Object> ageInfo = new HashMap<>();
ageInfo.put("ageRange", row.getAs("age_group") + "-" + (row.getInt(0) + 9) + "岁");
ageInfo.put("personCount", row.getAs("count"));
agePortrait.add(ageInfo);
}
List<Map<String, Object>> genderPortrait = new ArrayList<>();
for (Row row : genderList) {
Map<String, Object> genderInfo = new HashMap<>();
genderInfo.put("gender", row.getAs("gender"));
genderInfo.put("personCount", row.getAs("count"));
genderInfo.put("averagePremium", Math.round(row.getDouble(row.fieldIndex("avg_premium")) * 100.0) / 100.0);
genderPortrait.add(genderInfo);
}
List<Map<String, Object>> regionPortrait = new ArrayList<>();
for (Row row : regionList) {
Map<String, Object> regionInfo = new HashMap<>();
regionInfo.put("region", row.getAs("region"));
regionInfo.put("personCount", row.getAs("count"));
regionInfo.put("averageClaim", Math.round(row.getDouble(row.fieldIndex("avg_claim")) * 100.0) / 100.0);
regionPortrait.add(regionInfo);
}
List<Map<String, Object>> occupationPortrait = new ArrayList<>();
for (Row row : occupationList) {
Map<String, Object> occupationInfo = new HashMap<>();
occupationInfo.put("occupation", row.getAs("occupation"));
occupationInfo.put("personCount", row.getAs("count"));
occupationInfo.put("averageHealthScore", Math.round(row.getDouble(row.fieldIndex("avg_health_score")) * 100.0) / 100.0);
occupationPortrait.add(occupationInfo);
}
List<Map<String, Object>> bmiPortrait = new ArrayList<>();
for (Row row : bmiList) {
Map<String, Object> bmiInfo = new HashMap<>();
bmiInfo.put("bmiCategory", row.getAs("bmi_category"));
bmiInfo.put("personCount", row.getAs("count"));
bmiInfo.put("averagePremium", Math.round(row.getDouble(row.fieldIndex("avg_premium")) * 100.0) / 100.0);
bmiPortrait.add(bmiInfo);
}
List<Map<String, Object>> smokingPortrait = new ArrayList<>();
for (Row row : smokingList) {
Map<String, Object> smokingInfo = new HashMap<>();
smokingInfo.put("smokingStatus", row.getAs("smoking_status"));
smokingInfo.put("personCount", row.getAs("count"));
smokingInfo.put("averageClaimFreq", Math.round(row.getDouble(row.fieldIndex("avg_claim_freq")) * 100.0) / 100.0);
smokingPortrait.add(smokingInfo);
}
List<Map<String, Object>> incomePortrait = new ArrayList<>();
for (Row row : incomeList) {
Map<String, Object> incomeInfo = new HashMap<>();
incomeInfo.put("incomeRange", row.getAs("income_group") + "-" + (row.getLong(0) + 49999) + "元");
incomeInfo.put("personCount", row.getAs("count"));
incomeInfo.put("averagePremium", Math.round(row.getDouble(row.fieldIndex("avg_premium")) * 100.0) / 100.0);
incomePortrait.add(incomeInfo);
}
portraitMap.put("ageDistribution", agePortrait);
portraitMap.put("genderDistribution", genderPortrait);
portraitMap.put("regionDistribution", regionPortrait);
portraitMap.put("occupationDistribution", occupationPortrait);
portraitMap.put("bmiDistribution", bmiPortrait);
portraitMap.put("smokingDistribution", smokingPortrait);
portraitMap.put("incomeDistribution", incomePortrait);
portraitMap.put("totalPolicyholders", policyholderData.count());
return portraitMap;
}
}
基于大数据的健康保险数据可视化分析系统文档展示
💖💖作者:计算机编程小咖 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目