💖💖作者:计算机毕业设计小途 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目
@TOC
基于大数据的北京二手房数据分析与可视化系统介绍
《基于大数据的北京二手房数据分析与可视化系统》是一套面向计算机专业毕业设计的完整大数据分析解决方案,系统采用Hadoop+Spark大数据处理框架作为核心技术架构,通过HDFS分布式文件系统存储海量二手房数据,利用Spark SQL进行高效的数据清洗、转换与分析处理。系统提供Python+Django和Java+SpringBoot两种后端技术实现方案,前端采用Vue+ElementUI构建现代化的交互界面,集成Echarts图表库实现数据的多维度可视化展示。系统功能模块包含完善的用户管理、个人信息管理、二手房数据管理等基础功能,核心亮点在于数据分析与可视化模块,通过数据大屏可视化全景展示北京二手房市场整体态势,宏观市场分析模块运用Pandas和NumPy对房价走势、区域分布、成交量等关键指标进行深度挖掘,户型面积分析模块从不同户型、面积区间维度揭示市场结构特征,建筑特征分析模块针对建筑年代、楼层、朝向等属性进行统计分析,房产价值分析模块则通过多维度数据交叉分析评估房产的投资价值与性价比。整个系统充分体现大数据技术在实际业务场景中的应用价值,从数据采集、存储、计算到可视化展示形成完整的技术链路,既满足毕业设计对技术深度和广度的要求,又具备实际应用价值,是计算机专业学生掌握Hadoop生态系统、Spark计算引擎以及数据可视化技术的优质毕设项目选择。
基于大数据的北京二手房数据分析与可视化系统演示视频
基于大数据的北京二手房数据分析与可视化系统演示图片
基于大数据的北京二手房数据分析与可视化系统代码展示
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.functions;
import org.springframework.web.bind.annotation.*;
import java.util.*;
@RestController
@RequestMapping("/house")
public class HouseAnalysisController {
private SparkSession spark = SparkSession.builder().appName("BeijingHouseAnalysis").master("local[*]").config("spark.sql.warehouse.dir", "/user/hive/warehouse").getOrCreate();
@GetMapping("/macroMarketAnalysis")
public Map<String, Object> macroMarketAnalysis() {
Dataset<Row> houseData = spark.read().format("jdbc").option("url", "jdbc:mysql://localhost:3306/housedb").option("dbtable", "house_info").option("user", "root").option("password", "123456").load();
houseData.createOrReplaceTempView("house_temp");
Dataset<Row> priceByDistrict = spark.sql("SELECT district, AVG(price) as avg_price, COUNT(*) as house_count FROM house_temp GROUP BY district ORDER BY avg_price DESC");
Dataset<Row> priceByMonth = spark.sql("SELECT DATE_FORMAT(create_time, 'yyyy-MM') as month, AVG(price) as avg_price FROM house_temp GROUP BY month ORDER BY month");
Dataset<Row> totalStats = spark.sql("SELECT COUNT(*) as total_count, AVG(price) as overall_avg_price, MAX(price) as max_price, MIN(price) as min_price FROM house_temp");
Map<String, Object> result = new HashMap<>();
List<Map<String, Object>> districtList = new ArrayList<>();
for (Row row : priceByDistrict.collectAsList()) {
Map<String, Object> districtMap = new HashMap<>();
districtMap.put("district", row.getString(0));
districtMap.put("avgPrice", row.getDouble(1));
districtMap.put("houseCount", row.getLong(2));
districtList.add(districtMap);
}
List<Map<String, Object>> monthList = new ArrayList<>();
for (Row row : priceByMonth.collectAsList()) {
Map<String, Object> monthMap = new HashMap<>();
monthMap.put("month", row.getString(0));
monthMap.put("avgPrice", row.getDouble(1));
monthList.add(monthMap);
}
Row statsRow = totalStats.first();
result.put("districtAnalysis", districtList);
result.put("monthTrend", monthList);
result.put("totalCount", statsRow.getLong(0));
result.put("overallAvgPrice", statsRow.getDouble(1));
result.put("maxPrice", statsRow.getDouble(2));
result.put("minPrice", statsRow.getDouble(3));
return result;
}
@GetMapping("/houseTypeAreaAnalysis")
public Map<String, Object> houseTypeAreaAnalysis() {
Dataset<Row> houseData = spark.read().format("jdbc").option("url", "jdbc:mysql://localhost:3306/housedb").option("dbtable", "house_info").option("user", "root").option("password", "123456").load();
houseData.createOrReplaceTempView("house_area_temp");
Dataset<Row> typeAnalysis = spark.sql("SELECT house_type, COUNT(*) as count, AVG(price) as avg_price, AVG(area) as avg_area FROM house_area_temp GROUP BY house_type ORDER BY count DESC");
Dataset<Row> areaRangeAnalysis = spark.sql("SELECT CASE WHEN area < 50 THEN '50平米以下' WHEN area >= 50 AND area < 90 THEN '50-90平米' WHEN area >= 90 AND area < 120 THEN '90-120平米' WHEN area >= 120 AND area < 150 THEN '120-150平米' ELSE '150平米以上' END as area_range, COUNT(*) as count, AVG(price) as avg_price FROM house_area_temp GROUP BY area_range ORDER BY avg_price");
Dataset<Row> typeAreaCross = spark.sql("SELECT house_type, AVG(CASE WHEN area < 90 THEN price ELSE NULL END) as small_avg_price, AVG(CASE WHEN area >= 90 AND area < 120 THEN price ELSE NULL END) as medium_avg_price, AVG(CASE WHEN area >= 120 THEN price ELSE NULL END) as large_avg_price FROM house_area_temp GROUP BY house_type");
Map<String, Object> result = new HashMap<>();
List<Map<String, Object>> typeList = new ArrayList<>();
for (Row row : typeAnalysis.collectAsList()) {
Map<String, Object> typeMap = new HashMap<>();
typeMap.put("houseType", row.getString(0));
typeMap.put("count", row.getLong(1));
typeMap.put("avgPrice", row.getDouble(2));
typeMap.put("avgArea", row.getDouble(3));
typeList.add(typeMap);
}
List<Map<String, Object>> areaList = new ArrayList<>();
for (Row row : areaRangeAnalysis.collectAsList()) {
Map<String, Object> areaMap = new HashMap<>();
areaMap.put("areaRange", row.getString(0));
areaMap.put("count", row.getLong(1));
areaMap.put("avgPrice", row.getDouble(2));
areaList.add(areaMap);
}
List<Map<String, Object>> crossList = new ArrayList<>();
for (Row row : typeAreaCross.collectAsList()) {
Map<String, Object> crossMap = new HashMap<>();
crossMap.put("houseType", row.getString(0));
crossMap.put("smallAvgPrice", row.isNullAt(1) ? 0 : row.getDouble(1));
crossMap.put("mediumAvgPrice", row.isNullAt(2) ? 0 : row.getDouble(2));
crossMap.put("largeAvgPrice", row.isNullAt(3) ? 0 : row.getDouble(3));
crossList.add(crossMap);
}
result.put("typeAnalysis", typeList);
result.put("areaRangeAnalysis", areaList);
result.put("typeAreaCrossAnalysis", crossList);
return result;
}
@GetMapping("/buildingFeatureAnalysis")
public Map<String, Object> buildingFeatureAnalysis() {
Dataset<Row> houseData = spark.read().format("jdbc").option("url", "jdbc:mysql://localhost:3306/housedb").option("dbtable", "house_info").option("user", "root").option("password", "123456").load();
houseData.createOrReplaceTempView("building_temp");
Dataset<Row> buildYearAnalysis = spark.sql("SELECT CASE WHEN build_year < 2000 THEN '2000年前' WHEN build_year >= 2000 AND build_year < 2010 THEN '2000-2010年' WHEN build_year >= 2010 AND build_year < 2015 THEN '2010-2015年' ELSE '2015年后' END as year_range, COUNT(*) as count, AVG(price) as avg_price FROM building_temp GROUP BY year_range ORDER BY avg_price DESC");
Dataset<Row> floorAnalysis = spark.sql("SELECT floor_level, COUNT(*) as count, AVG(price) as avg_price FROM building_temp GROUP BY floor_level ORDER BY count DESC");
Dataset<Row> orientationAnalysis = spark.sql("SELECT orientation, COUNT(*) as count, AVG(price) as avg_price FROM building_temp GROUP BY orientation ORDER BY avg_price DESC");
Dataset<Row> elevatorAnalysis = spark.sql("SELECT has_elevator, COUNT(*) as count, AVG(price) as avg_price, AVG(area) as avg_area FROM building_temp GROUP BY has_elevator");
Dataset<Row> decorationAnalysis = spark.sql("SELECT decoration, COUNT(*) as count, AVG(price) as avg_price FROM building_temp GROUP BY decoration ORDER BY count DESC");
Map<String, Object> result = new HashMap<>();
List<Map<String, Object>> yearList = new ArrayList<>();
for (Row row : buildYearAnalysis.collectAsList()) {
Map<String, Object> yearMap = new HashMap<>();
yearMap.put("yearRange", row.getString(0));
yearMap.put("count", row.getLong(1));
yearMap.put("avgPrice", row.getDouble(2));
yearList.add(yearMap);
}
List<Map<String, Object>> floorList = new ArrayList<>();
for (Row row : floorAnalysis.collectAsList()) {
Map<String, Object> floorMap = new HashMap<>();
floorMap.put("floorLevel", row.getString(0));
floorMap.put("count", row.getLong(1));
floorMap.put("avgPrice", row.getDouble(2));
floorList.add(floorMap);
}
List<Map<String, Object>> orientationList = new ArrayList<>();
for (Row row : orientationAnalysis.collectAsList()) {
Map<String, Object> orientationMap = new HashMap<>();
orientationMap.put("orientation", row.getString(0));
orientationMap.put("count", row.getLong(1));
orientationMap.put("avgPrice", row.getDouble(2));
orientationList.add(orientationMap);
}
List<Map<String, Object>> elevatorList = new ArrayList<>();
for (Row row : elevatorAnalysis.collectAsList()) {
Map<String, Object> elevatorMap = new HashMap<>();
elevatorMap.put("hasElevator", row.getInt(0) == 1 ? "有电梯" : "无电梯");
elevatorMap.put("count", row.getLong(1));
elevatorMap.put("avgPrice", row.getDouble(2));
elevatorMap.put("avgArea", row.getDouble(3));
elevatorList.add(elevatorMap);
}
List<Map<String, Object>> decorationList = new ArrayList<>();
for (Row row : decorationAnalysis.collectAsList()) {
Map<String, Object> decorationMap = new HashMap<>();
decorationMap.put("decoration", row.getString(0));
decorationMap.put("count", row.getLong(1));
decorationMap.put("avgPrice", row.getDouble(2));
decorationList.add(decorationMap);
}
result.put("buildYearAnalysis", yearList);
result.put("floorAnalysis", floorList);
result.put("orientationAnalysis", orientationList);
result.put("elevatorAnalysis", elevatorList);
result.put("decorationAnalysis", decorationList);
return result;
}
}
基于大数据的北京二手房数据分析与可视化系统文档展示
💖💖作者:计算机毕业设计小途 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目