💖💖作者:计算机毕业设计江挽 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目
基于大数据的人口普查收入数据分析系统介绍
人口普查收入数据分析系统是一套基于Hadoop+Spark大数据技术栈构建的综合性数据分析平台,该系统采用Python作为主要开发语言,后端框架选用Django,前端采用Vue+ElementUI+Echarts技术组合,实现了对人口普查收入数据的全方位深度分析。系统核心功能包括人口结构特征分析、工作特征收入分析、教育回报差异分析、婚姻家庭角色分析以及用户资本收益分析等五大数据分析模块,通过Spark SQL和Pandas、NumPy等数据处理工具,能够对海量人口普查数据进行高效的ETL处理和统计分析。平台提供直观的可视化图表展示,支持多维度数据钻取和交互式查询,为研究人员和决策者提供了科学的数据支撑工具。系统还包含完善的用户管理功能,支持个人信息维护、密码修改等基础操作,整体架构采用前后端分离设计,数据存储基于MySQL数据库,通过HDFS分布式文件系统实现大规模数据的可靠存储和高效访问。
基于大数据的人口普查收入数据分析系统演示视频
基于大数据的人口普查收入数据分析系统演示图片
基于大数据的人口普查收入数据分析系统代码展示
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, avg, count, sum, desc, asc, when, isnan, isnull
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views import View
import mysql.connector
spark = SparkSession.builder.appName("PopulationIncomeAnalysis").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()
def population_structure_analysis(request):
connection = mysql.connector.connect(host='localhost', database='population_db', user='root', password='password')
cursor = connection.cursor()
cursor.execute("SELECT age, gender, education_level, marital_status, income FROM population_data WHERE income > 0")
raw_data = cursor.fetchall()
cursor.close()
connection.close()
schema = StructType([
StructField("age", IntegerType(), True),
StructField("gender", StringType(), True),
StructField("education_level", StringType(), True),
StructField("marital_status", StringType(), True),
StructField("income", DoubleType(), True)
])
df = spark.createDataFrame(raw_data, schema)
df = df.filter(col("income").isNotNull() & col("age").between(18, 65))
age_groups = df.withColumn("age_group",
when(col("age") < 25, "18-24")
.when(col("age") < 35, "25-34")
.when(col("age") < 45, "35-44")
.when(col("age") < 55, "45-54")
.otherwise("55-65"))
gender_income_analysis = age_groups.groupBy("gender", "age_group").agg(
avg("income").alias("avg_income"),
count("*").alias("population_count"),
sum("income").alias("total_income")
).orderBy("gender", "age_group")
education_distribution = age_groups.groupBy("education_level", "gender").agg(
avg("income").alias("avg_income"),
count("*").alias("count")
).orderBy(desc("avg_income"))
marital_income_relation = age_groups.groupBy("marital_status", "age_group").agg(
avg("income").alias("avg_income"),
count("*").alias("sample_size")
).filter(col("sample_size") > 50)
cross_analysis = age_groups.groupBy("gender", "education_level", "marital_status").agg(
avg("income").alias("avg_income"),
count("*").alias("group_size")
).filter(col("group_size") > 20).orderBy(desc("avg_income"))
result_data = {
'gender_income': gender_income_analysis.collect(),
'education_distribution': education_distribution.collect(),
'marital_income': marital_income_relation.collect(),
'cross_analysis': cross_analysis.collect()
}
return JsonResponse({'status': 'success', 'data': result_data})
def work_income_analysis(request):
connection = mysql.connector.connect(host='localhost', database='population_db', user='root', password='password')
cursor = connection.cursor()
cursor.execute("SELECT occupation, industry, work_years, work_hours_per_week, income, age, education_level FROM population_data WHERE income > 0 AND work_years >= 0")
raw_data = cursor.fetchall()
cursor.close()
connection.close()
schema = StructType([
StructField("occupation", StringType(), True),
StructField("industry", StringType(), True),
StructField("work_years", IntegerType(), True),
StructField("work_hours_per_week", IntegerType(), True),
StructField("income", DoubleType(), True),
StructField("age", IntegerType(), True),
StructField("education_level", StringType(), True)
])
df = spark.createDataFrame(raw_data, schema)
df = df.filter(col("work_years").between(0, 40) & col("work_hours_per_week").between(20, 80))
occupation_income_ranking = df.groupBy("occupation").agg(
avg("income").alias("avg_income"),
count("*").alias("worker_count"),
avg("work_years").alias("avg_experience"),
avg("work_hours_per_week").alias("avg_weekly_hours")
).filter(col("worker_count") > 100).orderBy(desc("avg_income"))
industry_analysis = df.groupBy("industry").agg(
avg("income").alias("avg_industry_income"),
count("*").alias("industry_size"),
avg("age").alias("avg_worker_age")
).filter(col("industry_size") > 50).orderBy(desc("avg_industry_income"))
experience_income_correlation = df.withColumn("experience_group",
when(col("work_years") < 2, "新手(0-2年)")
.when(col("work_years") < 5, "初级(2-5年)")
.when(col("work_years") < 10, "中级(5-10年)")
.when(col("work_years") < 20, "高级(10-20年)")
.otherwise("资深(20年以上)")
).groupBy("experience_group", "education_level").agg(
avg("income").alias("avg_income"),
count("*").alias("sample_count")
).filter(col("sample_count") > 30)
workload_income_analysis = df.withColumn("workload_category",
when(col("work_hours_per_week") < 40, "兼职")
.when(col("work_hours_per_week") < 50, "标准工时")
.otherwise("高强度工作")
).groupBy("workload_category", "occupation").agg(
avg("income").alias("avg_income"),
avg("work_hours_per_week").alias("avg_hours"),
count("*").alias("group_size")
).filter(col("group_size") > 25)
comprehensive_work_analysis = df.groupBy("occupation", "industry", "education_level").agg(
avg("income").alias("comprehensive_avg_income"),
count("*").alias("total_samples"),
avg("work_years").alias("avg_experience"),
sum("income").alias("total_income_sum")
).filter(col("total_samples") > 15).orderBy(desc("comprehensive_avg_income"))
result_data = {
'occupation_ranking': occupation_income_ranking.collect(),
'industry_analysis': industry_analysis.collect(),
'experience_correlation': experience_income_correlation.collect(),
'workload_analysis': workload_income_analysis.collect(),
'comprehensive_analysis': comprehensive_work_analysis.collect()
}
return JsonResponse({'status': 'success', 'data': result_data})
def education_return_analysis(request):
connection = mysql.connector.connect(host='localhost', database='population_db', user='root', password='password')
cursor = connection.cursor()
cursor.execute("SELECT education_level, income, age, work_years, occupation, industry, gender FROM population_data WHERE income > 0 AND education_level IS NOT NULL")
raw_data = cursor.fetchall()
cursor.close()
connection.close()
schema = StructType([
StructField("education_level", StringType(), True),
StructField("income", DoubleType(), True),
StructField("age", IntegerType(), True),
StructField("work_years", IntegerType(), True),
StructField("occupation", StringType(), True),
StructField("industry", StringType(), True),
StructField("gender", StringType(), True)
])
df = spark.createDataFrame(raw_data, schema)
df = df.filter(col("age").between(22, 60) & col("work_years").between(0, 35))
education_income_hierarchy = df.groupBy("education_level").agg(
avg("income").alias("avg_income"),
count("*").alias("sample_size"),
(sum("income") / count("*")).alias("calculated_avg"),
avg("age").alias("avg_age"),
avg("work_years").alias("avg_work_experience")
).filter(col("sample_size") > 80).orderBy(desc("avg_income"))
education_premium_calculation = df.withColumn("education_rank",
when(col("education_level") == "小学", 1)
.when(col("education_level") == "初中", 2)
.when(col("education_level") == "高中", 3)
.when(col("education_level") == "专科", 4)
.when(col("education_level") == "本科", 5)
.when(col("education_level") == "研究生", 6)
.otherwise(0)
).filter(col("education_rank") > 0)
gender_education_gap = education_premium_calculation.groupBy("education_level", "gender").agg(
avg("income").alias("avg_income_by_gender"),
count("*").alias("gender_sample_size")
).filter(col("gender_sample_size") > 40)
age_education_interaction = df.withColumn("age_group",
when(col("age") < 30, "青年(22-30)")
.when(col("age") < 40, "中年(30-40)")
.when(col("age") < 50, "中老年(40-50)")
.otherwise("资深(50-60)")
).groupBy("education_level", "age_group").agg(
avg("income").alias("age_edu_income"),
count("*").alias("age_edu_count")
).filter(col("age_edu_count") > 25)
occupation_education_matching = df.groupBy("education_level", "occupation").agg(
avg("income").alias("occupation_edu_income"),
count("*").alias("matching_count"),
avg("work_years").alias("avg_experience_in_role")
).filter(col("matching_count") > 20).orderBy(desc("occupation_edu_income"))
industry_education_premium = df.groupBy("education_level", "industry").agg(
avg("income").alias("industry_edu_income"),
count("*").alias("industry_edu_sample"),
sum("income").alias("total_industry_income")
).filter(col("industry_edu_sample") > 30)
education_roi_analysis = education_premium_calculation.groupBy("education_level").agg(
avg("income").alias("lifetime_avg_income"),
count("*").alias("total_graduates"),
(avg("income") * avg("work_years")).alias("cumulative_income_estimate")
).orderBy(desc("lifetime_avg_income"))
result_data = {
'education_hierarchy': education_income_hierarchy.collect(),
'gender_education_gap': gender_education_gap.collect(),
'age_education_interaction': age_education_interaction.collect(),
'occupation_education_matching': occupation_education_matching.collect(),
'industry_education_premium': industry_education_premium.collect(),
'education_roi': education_roi_analysis.collect()
}
return JsonResponse({'status': 'success', 'data': result_data})
基于大数据的人口普查收入数据分析系统文档展示
💖💖作者:计算机毕业设计江挽 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目