【大数据】计算机岗位招聘数据可视化分析系统 计算机毕业设计项目 Hadoop+Spark环境配置 数据科学与大数据技术 附源码+文档+讲解

27 阅读4分钟

一、个人简介

💖💖作者:计算机编程果茶熊 💙💙个人简介:曾长期从事计算机专业培训教学,担任过编程老师,同时本人也热爱上课教学,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 计算机毕业设计选题 💕💕文末获取源码联系计算机编程果茶熊

二、系统介绍

大数据框架:Hadoop+Spark(Hive需要定制修改) 开发语言:Java+Python(两个版本都支持) 数据库:MySQL 后端框架:SpringBoot(Spring+SpringMVC+Mybatis)+Django(两个版本都支持) 前端:Vue+Echarts+HTML+CSS+JavaScript+jQuery

三、视频解说

计算机岗位招聘数据可视化分析系统

四、部分功能展示

在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述

五、部分代码展示


from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, avg, sum, when, desc, asc
from pyspark.sql.types import IntegerType, FloatType
import pandas as pd
import numpy as np
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json

spark = SparkSession.builder.appName("JobAnalysisSystem").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()

@csrf_exempt
def position_dimension_analysis(request):
    job_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/job_analysis").option("dbtable", "job_positions").option("user", "root").option("password", "password").option("driver", "com.mysql.cj.jdbc.Driver").load()
    position_stats = job_df.groupBy("position_name").agg(count("*").alias("job_count"), avg("salary_min").alias("avg_salary_min"), avg("salary_max").alias("avg_salary_max")).orderBy(desc("job_count"))
    skill_requirements = job_df.select("position_name", "required_skills").rdd.flatMap(lambda row: [(row[0], skill.strip()) for skill in row[1].split(",") if skill.strip()]).toDF(["position", "skill"])
    skill_stats = skill_requirements.groupBy("position", "skill").count().orderBy("position", desc("count"))
    education_stats = job_df.groupBy("position_name", "education_requirement").count().orderBy("position_name", desc("count"))
    experience_distribution = job_df.withColumn("experience_level", when(col("work_experience") <= 1, "新手").when(col("work_experience") <= 3, "初级").when(col("work_experience") <= 5, "中级").otherwise("高级")).groupBy("position_name", "experience_level").count()
    company_scale_analysis = job_df.groupBy("position_name", "company_scale").count().orderBy("position_name", desc("count"))
    position_growth_trend = job_df.groupBy("position_name", "publish_month").count().orderBy("position_name", "publish_month")
    salary_range_analysis = job_df.withColumn("salary_range", when((col("salary_min") + col("salary_max")) / 2 < 8000, "低薪").when((col("salary_min") + col("salary_max")) / 2 < 15000, "中薪").otherwise("高薪")).groupBy("position_name", "salary_range").count()
    result_data = {"position_stats": [row.asDict() for row in position_stats.collect()], "skill_stats": [row.asDict() for row in skill_stats.collect()], "education_stats": [row.asDict() for row in education_stats.collect()], "experience_distribution": [row.asDict() for row in experience_distribution.collect()], "company_scale_analysis": [row.asDict() for row in company_scale_analysis.collect()], "position_growth_trend": [row.asDict() for row in position_growth_trend.collect()], "salary_range_analysis": [row.asDict() for row in salary_range_analysis.collect()]}
    return JsonResponse(result_data, safe=False)

@csrf_exempt
def salary_experience_analysis(request):
    job_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/job_analysis").option("dbtable", "job_positions").option("user", "root").option("password", "password").option("driver", "com.mysql.cj.jdbc.Driver").load()
    salary_exp_correlation = job_df.select("work_experience", "salary_min", "salary_max").withColumn("avg_salary", (col("salary_min") + col("salary_max")) / 2)
    experience_salary_stats = salary_exp_correlation.groupBy("work_experience").agg(avg("avg_salary").alias("mean_salary"), count("*").alias("job_count")).orderBy("work_experience")
    experience_ranges = job_df.withColumn("exp_range", when(col("work_experience") == 0, "应届生").when(col("work_experience") <= 2, "1-2年").when(col("work_experience") <= 5, "3-5年").when(col("work_experience") <= 10, "6-10年").otherwise("10年以上"))
    range_salary_analysis = experience_ranges.groupBy("exp_range").agg(avg((col("salary_min") + col("salary_max")) / 2).alias("avg_salary"), count("*").alias("position_count")).orderBy("avg_salary")
    position_exp_salary = job_df.groupBy("position_name", "work_experience").agg(avg((col("salary_min") + col("salary_max")) / 2).alias("avg_salary"), count("*").alias("count")).filter(col("count") >= 5)
    salary_growth_analysis = job_df.withColumn("salary_mid", (col("salary_min") + col("salary_max")) / 2).groupBy("work_experience").agg(avg("salary_mid").alias("avg_salary")).orderBy("work_experience")
    education_salary_impact = job_df.groupBy("education_requirement", "work_experience").agg(avg((col("salary_min") + col("salary_max")) / 2).alias("avg_salary")).orderBy("education_requirement", "work_experience")
    company_size_salary_exp = job_df.groupBy("company_scale", "work_experience").agg(avg((col("salary_min") + col("salary_max")) / 2).alias("avg_salary"), count("*").alias("sample_size")).filter(col("sample_size") >= 3)
    high_salary_experience_threshold = job_df.withColumn("salary_avg", (col("salary_min") + col("salary_max")) / 2).filter(col("salary_avg") >= 20000).groupBy("work_experience").count().orderBy(desc("count"))
    industry_exp_salary = job_df.groupBy("industry_type", "work_experience").agg(avg((col("salary_min") + col("salary_max")) / 2).alias("avg_salary"), count("*").alias("job_count")).filter(col("job_count") >= 5)
    result_data = {"experience_salary_stats": [row.asDict() for row in experience_salary_stats.collect()], "range_salary_analysis": [row.asDict() for row in range_salary_analysis.collect()], "position_exp_salary": [row.asDict() for row in position_exp_salary.collect()], "salary_growth_analysis": [row.asDict() for row in salary_growth_analysis.collect()], "education_salary_impact": [row.asDict() for row in education_salary_impact.collect()], "company_size_salary_exp": [row.asDict() for row in company_size_salary_exp.collect()], "high_salary_experience": [row.asDict() for row in high_salary_experience_threshold.collect()], "industry_exp_salary": [row.asDict() for row in industry_exp_salary.collect()]}
    return JsonResponse(result_data, safe=False)

@csrf_exempt
def regional_company_analysis(request):
    job_df = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3306/job_analysis").option("dbtable", "job_positions").option("user", "root").option("password", "password").option("driver", "com.mysql.cj.jdbc.Driver").load()
    regional_distribution = job_df.groupBy("city", "province").agg(count("*").alias("job_count"), avg((col("salary_min") + col("salary_max")) / 2).alias("avg_salary")).orderBy(desc("job_count"))
    city_position_analysis = job_df.groupBy("city", "position_name").count().orderBy("city", desc("count"))
    company_scale_distribution = job_df.groupBy("city", "company_scale").count().orderBy("city", desc("count"))
    regional_salary_comparison = job_df.groupBy("city").agg(avg("salary_min").alias("avg_min_salary"), avg("salary_max").alias("avg_max_salary"), avg((col("salary_min") + col("salary_max")) / 2).alias("avg_salary")).orderBy(desc("avg_salary"))
    top_companies_by_region = job_df.groupBy("city", "company_name").count().orderBy("city", desc("count"))
    industry_regional_distribution = job_df.groupBy("province", "industry_type").count().orderBy("province", desc("count"))
    cost_effectiveness_analysis = job_df.withColumn("salary_avg", (col("salary_min") + col("salary_max")) / 2).groupBy("city").agg(avg("salary_avg").alias("avg_salary"), count("*").alias("opportunity_count")).withColumn("cost_effectiveness", col("avg_salary") / col("opportunity_count")).orderBy(desc("cost_effectiveness"))
    regional_experience_requirements = job_df.groupBy("city", "work_experience").count().orderBy("city", "work_experience")
    education_regional_preference = job_df.groupBy("city", "education_requirement").count().orderBy("city", desc("count"))
    emerging_cities_analysis = job_df.filter(~col("city").isin(["北京", "上海", "广州", "深圳"])).groupBy("city").agg(count("*").alias("job_count"), avg((col("salary_min") + col("salary_max")) / 2).alias("avg_salary")).filter(col("job_count") >= 50).orderBy(desc("avg_salary"))
    company_type_regional = job_df.groupBy("city", "company_type").count().orderBy("city", desc("count"))
    result_data = {"regional_distribution": [row.asDict() for row in regional_distribution.collect()], "city_position_analysis": [row.asDict() for row in city_position_analysis.collect()], "company_scale_distribution": [row.asDict() for row in company_scale_distribution.collect()], "regional_salary_comparison": [row.asDict() for row in regional_salary_comparison.collect()], "top_companies_by_region": [row.asDict() for row in top_companies_by_region.collect()], "industry_regional_distribution": [row.asDict() for row in industry_regional_distribution.collect()], "cost_effectiveness_analysis": [row.asDict() for row in cost_effectiveness_analysis.collect()], "regional_experience_requirements": [row.asDict() for row in regional_experience_requirements.collect()], "education_regional_preference": [row.asDict() for row in education_regional_preference.collect()], "emerging_cities_analysis": [row.asDict() for row in emerging_cities_analysis.collect()], "company_type_regional": [row.asDict() for row in company_type_regional.collect()]}
    return JsonResponse(result_data, safe=False)

六、部分文档展示

在这里插入图片描述

七、END

💕💕文末获取源码联系计算机编程果茶熊