前言
- 💖💖作者:计算机程序员小杨
- 💙💙个人简介:我是一名计算机相关专业的从业者,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。热爱技术,喜欢钻研新工具和框架,也乐于通过代码解决实际问题,大家有技术代码这一块的问题可以问我!
- 💛💛想说的话:感谢大家的关注与支持!
- 💕💕文末获取源码联系 计算机程序员小杨
- 💜💜
- 网站实战项目
- 安卓/小程序实战项目
- 大数据实战项目
- 深度学习实战项目
- 计算机毕业设计选题
- 💜💜
一.开发工具简介
- 后端开发语言:Java
- 后端框架:Spring Boot(Spring+SpringMVC+Mybatis)
- 前端:微信小程序
- 数据库:MySQL
- 系统架构:C/S
- 开发工具:微信小程序开发工具
二.系统内容简介
《垃圾分类信息系统》是一款基于微信小程序的环保信息管理平台,采用Java语言结合Spring Boot框架构建后端服务体系,通过MySQL数据库实现数据持久化存储。该系统运用Spring+SpringMVC+MyBatis的经典架构模式,为用户提供便捷的垃圾分类查询和管理服务。系统核心功能涵盖用户账户管理、垃圾类型分类管理、分类信息维护、投放标准管理、用户反馈处理以及系统运维管理等六大模块。用户可通过微信小程序端随时查询各类垃圾的正确分类方法和投放要求,管理员则可通过后台系统对垃圾分类数据进行实时更新和维护。系统采用C/S架构设计,确保数据传输的稳定性和安全性,同时利用微信生态的便民特性,降低用户使用门槛,提升垃圾分类知识的普及效率和用户参与度。
三.系统功能演示
四.系统界面展示
五.系统源码展示
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
@Service
public class WasteClassificationService {
private SparkSession spark = SparkSession.builder().appName("WasteClassification").master("local[*]").getOrCreate();
@Autowired
private UserMapper userMapper;
@Autowired
private WasteTypeMapper wasteTypeMapper;
@Autowired
private ClassificationInfoMapper classificationInfoMapper;
public ResultVO registerUser(UserRegisterDTO userDTO) {
User existUser = userMapper.selectByOpenId(userDTO.getOpenId());
if (existUser != null) {
return ResultVO.fail("用户已存在");
}
User newUser = new User();
newUser.setOpenId(userDTO.getOpenId());
newUser.setNickname(userDTO.getNickname());
newUser.setAvatar(userDTO.getAvatar());
newUser.setPhone(userDTO.getPhone());
newUser.setCreateTime(new Date());
newUser.setUpdateTime(new Date());
newUser.setStatus(1);
newUser.setUserType(0);
int result = userMapper.insertSelective(newUser);
if (result > 0) {
Dataset<Row> userDataset = spark.read().format("jdbc").option("url", "jdbc:mysql://localhost:3306/waste_db").option("dbtable", "t_user").option("user", "root").option("password", "123456").load();
userDataset.createOrReplaceTempView("users");
Dataset<Row> analysisResult = spark.sql("SELECT user_type, COUNT(*) as count FROM users GROUP BY user_type");
analysisResult.show();
return ResultVO.success("注册成功", newUser);
} else {
return ResultVO.fail("注册失败");
}
}
public ResultVO addWasteType(WasteTypeDTO wasteTypeDTO) {
WasteType existType = wasteTypeMapper.selectByTypeName(wasteTypeDTO.getTypeName());
if (existType != null) {
return ResultVO.fail("垃圾类型已存在");
}
WasteType wasteType = new WasteType();
wasteType.setTypeName(wasteTypeDTO.getTypeName());
wasteType.setTypeCode(wasteTypeDTO.getTypeCode());
wasteType.setTypeColor(wasteTypeDTO.getTypeColor());
wasteType.setTypeIcon(wasteTypeDTO.getTypeIcon());
wasteType.setDescription(wasteTypeDTO.getDescription());
wasteType.setCreateTime(new Date());
wasteType.setUpdateTime(new Date());
wasteType.setStatus(1);
wasteType.setSortOrder(wasteTypeDTO.getSortOrder());
int result = wasteTypeMapper.insertSelective(wasteType);
if (result > 0) {
Dataset<Row> wasteTypeDataset = spark.read().format("jdbc").option("url", "jdbc:mysql://localhost:3306/waste_db").option("dbtable", "t_waste_type").option("user", "root").option("password", "123456").load();
wasteTypeDataset.createOrReplaceTempView("waste_types");
Dataset<Row> typeAnalysis = spark.sql("SELECT type_name, COUNT(*) as usage_count FROM waste_types WHERE status = 1 GROUP BY type_name ORDER BY usage_count DESC");
typeAnalysis.show();
List<WasteType> typeList = wasteTypeMapper.selectAll();
return ResultVO.success("添加成功", typeList);
} else {
return ResultVO.fail("添加失败");
}
}
public ResultVO searchClassificationInfo(ClassificationSearchDTO searchDTO) {
List<ClassificationInfo> infoList = new ArrayList<>();
if (StringUtils.isNotEmpty(searchDTO.getKeyword())) {
infoList = classificationInfoMapper.selectByKeyword(searchDTO.getKeyword());
}
if (searchDTO.getTypeId() != null) {
List<ClassificationInfo> typeInfoList = classificationInfoMapper.selectByTypeId(searchDTO.getTypeId());
infoList.addAll(typeInfoList);
}
if (infoList.isEmpty()) {
infoList = classificationInfoMapper.selectAllActive();
}
Dataset<Row> classificationDataset = spark.read().format("jdbc").option("url", "jdbc:mysql://localhost:3306/waste_db").option("dbtable", "t_classification_info").option("user", "root").option("password", "123456").load();
classificationDataset.createOrReplaceTempView("classifications");
Dataset<Row> searchAnalysis = spark.sql("SELECT waste_name, search_count, type_id FROM classifications WHERE status = 1 ORDER BY search_count DESC LIMIT 10");
searchAnalysis.show();
for (ClassificationInfo info : infoList) {
info.setSearchCount(info.getSearchCount() + 1);
classificationInfoMapper.updateSearchCount(info.getId(), info.getSearchCount());
WasteType wasteType = wasteTypeMapper.selectByPrimaryKey(info.getTypeId());
info.setWasteType(wasteType);
}
Map<String, Object> resultMap = new HashMap<>();
resultMap.put("list", infoList);
resultMap.put("total", infoList.size());
return ResultVO.success("查询成功", resultMap);
}
}
六.系统文档展示
结束
💕💕文末获取源码联系 计算机程序员小杨