前言
💖💖作者:计算机程序员小杨 💙💙个人简介:我是一名计算机相关专业的从业者,擅长Java、微信小程序、Python、Golang、安卓Android等多个IT方向。会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。热爱技术,喜欢钻研新工具和框架,也乐于通过代码解决实际问题,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💕💕文末获取源码联系 计算机程序员小杨 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学习实战项目 计算机毕业设计选题 💜💜
一.开发工具简介
开发语言:Java+Python(两个版本都支持) 后端框架:Spring Boot(Spring+SpringMVC+Mybatis)+Django(两个版本都支持) 前端:Vue+ElementUI+HTML 数据库:MySQL 系统架构:B/S 开发工具:IDEA(Java的)或者PyCharm(Python的)
二.系统内容简介
农业信息管理系统是一款基于Spring Boot和Vue技术栈开发的综合性农业生产管理平台,采用B/S架构实现跨平台访问。系统通过MySQL数据库存储和管理农业生产全流程数据,涵盖农户信息登记、作物类型分类、农事活动规划、土壤环境监测、气象数据采集、种植过程记录、农资产品管理、采购使用追踪、市场行情分析以及农业知识库构建等核心功能模块。平台运用MyBatis持久层框架实现数据访问,通过SpringMVC处理前后端交互,Vue框架构建响应式用户界面,为农业生产经营者提供从种植规划到市场销售的全链条信息化管理工具,帮助用户实现生产数据数字化存储、农事活动科学规划、农资使用精准管理、市场信息及时获取,提升农业生产管理效率和决策科学性。
三.系统功能演示
四.系统界面展示
五.系统源码展示
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.springframework.stereotype.Service;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.*;
@Service
public class AgriculturalCoreService {
@Autowired
private PlantingRecordMapper plantingRecordMapper;
@Autowired
private AgriculturalMaterialMapper materialMapper;
@Autowired
private MarketInfoMapper marketInfoMapper;
private SparkSession spark = SparkSession.builder().appName("AgriculturalDataAnalysis").master("local[*]").getOrCreate();
public Map<String, Object> analyzePlantingRecordWithBigData(Long farmerId, String cropType, Date startDate, Date endDate) {
List<PlantingRecord> records = plantingRecordMapper.selectByFarmerAndCropAndDateRange(farmerId, cropType, startDate, endDate);
Dataset<Row> recordDataset = spark.createDataFrame(records, PlantingRecord.class);
recordDataset.createOrReplaceTempView("planting_records");
Dataset<Row> yieldAnalysis = spark.sql("SELECT crop_type, AVG(yield_amount) as avg_yield, SUM(planting_area) as total_area, COUNT(*) as record_count FROM planting_records GROUP BY crop_type");
Dataset<Row> costAnalysis = spark.sql("SELECT crop_type, AVG(total_cost) as avg_cost, SUM(total_cost) as total_cost FROM planting_records GROUP BY crop_type");
Map<String, Object> result = new HashMap<>();
result.put("yieldAnalysis", yieldAnalysis.collectAsList());
result.put("costAnalysis", costAnalysis.collectAsList());
double totalYield = records.stream().mapToDouble(PlantingRecord::getYieldAmount).sum();
double totalCost = records.stream().mapToDouble(PlantingRecord::getTotalCost).sum();
double profitMargin = totalYield > 0 ? ((totalYield - totalCost) / totalYield) * 100 : 0;
result.put("totalYield", totalYield);
result.put("totalCost", totalCost);
result.put("profitMargin", profitMargin);
Map<String, Double> monthlyYield = new HashMap<>();
for (PlantingRecord record : records) {
String month = new java.text.SimpleDateFormat("yyyy-MM").format(record.getHarvestDate());
monthlyYield.put(month, monthlyYield.getOrDefault(month, 0.0) + record.getYieldAmount());
}
result.put("monthlyYieldTrend", monthlyYield);
List<Map<String, Object>> riskAssessment = new ArrayList<>();
for (PlantingRecord record : records) {
Map<String, Object> risk = new HashMap<>();
risk.put("recordId", record.getId());
risk.put("cropType", record.getCropType());
double yieldRate = record.getPlantingArea() > 0 ? record.getYieldAmount() / record.getPlantingArea() : 0;
risk.put("yieldRate", yieldRate);
risk.put("riskLevel", yieldRate < 1000 ? "高" : yieldRate < 2000 ? "中" : "低");
riskAssessment.add(risk);
}
result.put("riskAssessment", riskAssessment);
return result;
}
public Map<String, Object> optimizeAgriculturalMaterialProcurement(Long farmerId, String materialType, Integer requiredQuantity) {
List<AgriculturalMaterial> materials = materialMapper.selectByTypeOrderByPrice(materialType);
Dataset<Row> materialDataset = spark.createDataFrame(materials, AgriculturalMaterial.class);
materialDataset.createOrReplaceTempView("materials");
Dataset<Row> priceAnalysis = spark.sql("SELECT supplier_name, AVG(unit_price) as avg_price, MIN(unit_price) as min_price, MAX(unit_price) as max_price FROM materials GROUP BY supplier_name");
List<ProcurementHistory> history = materialMapper.selectProcurementHistoryByFarmer(farmerId);
Dataset<Row> historyDataset = spark.createDataFrame(history, ProcurementHistory.class);
historyDataset.createOrReplaceTempView("procurement_history");
Dataset<Row> supplierRating = spark.sql("SELECT supplier_name, COUNT(*) as order_count, AVG(quality_score) as avg_quality, AVG(delivery_score) as avg_delivery FROM procurement_history GROUP BY supplier_name ORDER BY avg_quality DESC, avg_delivery DESC");
Map<String, Object> result = new HashMap<>();
result.put("priceAnalysis", priceAnalysis.collectAsList());
result.put("supplierRating", supplierRating.collectAsList());
List<Map<String, Object>> recommendations = new ArrayList<>();
for (AgriculturalMaterial material : materials) {
if (material.getStockQuantity() >= requiredQuantity) {
Map<String, Object> recommendation = new HashMap<>();
recommendation.put("materialId", material.getId());
recommendation.put("materialName", material.getMaterialName());
recommendation.put("supplierName", material.getSupplierName());
recommendation.put("unitPrice", material.getUnitPrice());
recommendation.put("totalCost", material.getUnitPrice() * requiredQuantity);
recommendation.put("stockQuantity", material.getStockQuantity());
double qualityScore = history.stream().filter(h -> h.getMaterialId().equals(material.getId())).mapToDouble(ProcurementHistory::getQualityScore).average().orElse(0);
recommendation.put("historicalQualityScore", qualityScore);
recommendation.put("recommendationScore", (100 - material.getUnitPrice() / 10) * 0.6 + qualityScore * 0.4);
recommendations.add(recommendation);
}
}
recommendations.sort((a, b) -> Double.compare((Double) b.get("recommendationScore"), (Double) a.get("recommendationScore")));
result.put("procurementRecommendations", recommendations.size() > 5 ? recommendations.subList(0, 5) : recommendations);
double avgHistoricalPrice = history.stream().filter(h -> h.getMaterialType().equals(materialType)).mapToDouble(ProcurementHistory::getUnitPrice).average().orElse(0);
double currentMinPrice = materials.stream().mapToDouble(AgriculturalMaterial::getUnitPrice).min().orElse(0);
result.put("priceComparison", Map.of("historicalAvgPrice", avgHistoricalPrice, "currentMinPrice", currentMinPrice, "priceDifference", avgHistoricalPrice - currentMinPrice));
return result;
}
public Map<String, Object> analyzeMarketTrendsAndForecast(String productType, Date startDate, Date endDate) {
List<MarketInfo> marketData = marketInfoMapper.selectByProductTypeAndDateRange(productType, startDate, endDate);
Dataset<Row> marketDataset = spark.createDataFrame(marketData, MarketInfo.class);
marketDataset.createOrReplaceTempView("market_info");
Dataset<Row> pricetrend = spark.sql("SELECT DATE_FORMAT(record_date, 'yyyy-MM-dd') as date, AVG(market_price) as avg_price, MAX(market_price) as max_price, MIN(market_price) as min_price FROM market_info GROUP BY DATE_FORMAT(record_date, 'yyyy-MM-dd') ORDER BY date");
Dataset<Row> regionAnalysis = spark.sql("SELECT market_region, AVG(market_price) as avg_price, SUM(trading_volume) as total_volume FROM market_info GROUP BY market_region ORDER BY total_volume DESC");
Map<String, Object> result = new HashMap<>();
result.put("priceTrend", pricetrend.collectAsList());
result.put("regionAnalysis", regionAnalysis.collectAsList());
List<Double> prices = marketData.stream().map(MarketInfo::getMarketPrice).sorted().collect(java.util.stream.Collectors.toList());
double avgPrice = prices.stream().mapToDouble(Double::doubleValue).average().orElse(0);
double priceVolatility = Math.sqrt(prices.stream().mapToDouble(p -> Math.pow(p - avgPrice, 2)).average().orElse(0));
result.put("priceStatistics", Map.of("averagePrice", avgPrice, "priceVolatility", priceVolatility, "medianPrice", prices.get(prices.size() / 2)));
List<Map<String, Object>> forecast = new ArrayList<>();
int windowSize = Math.min(7, marketData.size());
for (int i = 0; i < 30; i++) {
double forecastPrice = prices.subList(Math.max(0, prices.size() - windowSize), prices.size()).stream().mapToDouble(Double::doubleValue).average().orElse(avgPrice);
double trendAdjustment = i * (prices.get(prices.size() - 1) - prices.get(Math.max(0, prices.size() - windowSize))) / windowSize;
Map<String, Object> forecastPoint = new HashMap<>();
forecastPoint.put("daysAhead", i + 1);
forecastPoint.put("forecastPrice", forecastPrice + trendAdjustment);
forecastPoint.put("confidenceLevel", Math.max(50, 95 - i * 1.5));
forecast.add(forecastPoint);
}
result.put("priceForecast", forecast);
Map<String, Double> seasonalPattern = new HashMap<>();
for (MarketInfo info : marketData) {
String season = getSeasonFromDate(info.getRecordDate());
seasonalPattern.put(season, seasonalPattern.getOrDefault(season, 0.0) + info.getMarketPrice());
}
seasonalPattern.replaceAll((k, v) -> v / marketData.stream().filter(m -> getSeasonFromDate(m.getRecordDate()).equals(k)).count());
result.put("seasonalPattern", seasonalPattern);
String recommendation = avgPrice > prices.get(prices.size() - 1) ? "当前价格低于平均水平,建议适当储备待价而沽" : priceVolatility > avgPrice * 0.15 ? "价格波动较大,建议分批销售降低风险" : "价格相对稳定,可按常规节奏销售";
result.put("marketRecommendation", recommendation);
return result;
}
private String getSeasonFromDate(Date date) {
java.util.Calendar cal = java.util.Calendar.getInstance();
cal.setTime(date);
int month = cal.get(java.util.Calendar.MONTH) + 1;
if (month >= 3 && month <= 5) return "春季";
if (month >= 6 && month <= 8) return "夏季";
if (month >= 9 && month <= 11) return "秋季";
return "冬季";
}
}
六.系统文档展示
结束
💕💕文末获取源码联系 计算机程序员小杨