校园美食推荐系统 | 【毕设大数据选题推荐】大数据可视化大屏 毕设大数据项目 附源码 万字论文+文档指导+ppt+课程设计 Hadoop SPark

99 阅读7分钟

💖💖作者:计算机毕业设计杰瑞 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学校实战项目 计算机毕业设计选题推荐

校园美食推荐系统介绍

校园美食推荐系统是一个基于大数据技术的智能化平台,采用Hadoop+Spark分布式计算框架作为核心技术架构,通过Python开发语言结合Django后端框架构建稳定可靠的服务端,前端采用Vue+ElementUI+Echarts技术栈实现现代化的用户界面。系统利用Spark SQL和Pandas、NumPy等数据处理工具对校园美食相关数据进行深度挖掘和分析,通过HDFS分布式文件系统存储海量美食数据,结合MySQL数据库管理结构化信息。平台主要包含系统首页展示、学生用户管理、美食信息维护、预测数据分析、系统后台管理以及个人中心等核心功能模块。系统运用大数据分析技术对学生的美食偏好、消费习惯、评价反馈等多维度数据进行智能处理,通过Spark计算引擎实现个性化推荐算法,为校园师生提供精准的美食推荐服务,同时利用数据可视化技术展现美食消费趋势和预测分析结果,提升校园餐饮服务的智能化水平和用户体验满意度。

校园美食推荐系统演示视频

演示视频

校园美食推荐系统演示图片

在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述 在这里插入图片描述

校园美食推荐系统代码展示

from pyspark.sql import SparkSession
from pyspark.ml.recommendation import ALS
from pyspark.ml.feature import StringIndexer
from pyspark.sql.functions import col, desc, avg, count, when
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import pandas as pd
import numpy as np
import json
from datetime import datetime, timedelta
spark = SparkSession.builder.appName("CampusFoodRecommendation").master("local[*]").config("spark.sql.adaptive.enabled", "true").config("spark.sql.adaptive.coalescePartitions.enabled", "true").getOrCreate()
@csrf_exempt
def food_recommendation_algorithm(request):
    user_id = request.POST.get('user_id')
    user_behavior_df = spark.sql("SELECT user_id, food_id, rating, timestamp FROM user_ratings WHERE user_id IS NOT NULL AND food_id IS NOT NULL")
    food_info_df = spark.sql("SELECT food_id, food_name, category, price, restaurant FROM food_info")
    user_indexer = StringIndexer(inputCol="user_id", outputCol="user_index").fit(user_behavior_df)
    food_indexer = StringIndexer(inputCol="food_id", outputCol="food_index").fit(user_behavior_df)
    indexed_data = user_indexer.transform(user_behavior_df)
    indexed_data = food_indexer.transform(indexed_data)
    als = ALS(maxIter=10, regParam=0.01, userCol="user_index", itemCol="food_index", ratingCol="rating", coldStartStrategy="drop")
    model = als.fit(indexed_data)
    target_user_df = indexed_data.filter(col("user_id") == user_id).select("user_index").distinct()
    if target_user_df.count() == 0:
        popular_foods = food_info_df.join(user_behavior_df.groupBy("food_id").agg(avg("rating").alias("avg_rating"), count("rating").alias("rating_count")), "food_id").filter(col("rating_count") > 5).orderBy(desc("avg_rating")).limit(10)
        result_data = [{"food_id": row.food_id, "food_name": row.food_name, "category": row.category, "price": row.price, "restaurant": row.restaurant, "predicted_rating": row.avg_rating} for row in popular_foods.collect()]
        return JsonResponse({"status": "success", "recommendations": result_data, "algorithm": "popularity_based"})
    user_recommendations = model.recommendForUserSubset(target_user_df, 10)
    recommendations_list = user_recommendations.collect()[0]["recommendations"]
    food_index_to_id = {row.food_index: row.food_id for row in indexed_data.select("food_index", "food_id").distinct().collect()}
    recommended_food_ids = [food_index_to_id[rec["food_index"]] for rec in recommendations_list]
    recommended_foods = food_info_df.filter(col("food_id").isin(recommended_food_ids))
    result_data = []
    for i, rec in enumerate(recommendations_list):
        food_id = food_index_to_id[rec["food_index"]]
        food_detail = recommended_foods.filter(col("food_id") == food_id).collect()[0]
        result_data.append({"food_id": food_id, "food_name": food_detail.food_name, "category": food_detail.category, "price": food_detail.price, "restaurant": food_detail.restaurant, "predicted_rating": round(rec["rating"], 2)})
    user_preference_analysis = user_behavior_df.filter(col("user_id") == user_id).join(food_info_df, "food_id").groupBy("category").agg(avg("rating").alias("avg_category_rating"), count("rating").alias("category_count")).orderBy(desc("avg_category_rating"))
    preference_data = [{"category": row.category, "avg_rating": round(row.avg_category_rating, 2), "count": row.category_count} for row in user_preference_analysis.collect()]
    return JsonResponse({"status": "success", "recommendations": result_data, "user_preferences": preference_data, "algorithm": "collaborative_filtering"})
@csrf_exempt
def food_information_management(request):
    if request.method == 'POST':
        action = request.POST.get('action')
        if action == 'add_food':
            food_data = json.loads(request.POST.get('food_data'))
            new_food_df = spark.createDataFrame([food_data])
            existing_foods_df = spark.sql("SELECT * FROM food_info")
            updated_foods_df = existing_foods_df.union(new_food_df)
            updated_foods_df.write.mode("overwrite").saveAsTable("food_info")
            nutrition_analysis = spark.sql("SELECT category, AVG(calories) as avg_calories, AVG(protein) as avg_protein, AVG(fat) as avg_fat FROM food_info GROUP BY category")
            category_stats = [{"category": row.category, "avg_calories": round(row.avg_calories, 2), "avg_protein": round(row.avg_protein, 2), "avg_fat": round(row.avg_fat, 2)} for row in nutrition_analysis.collect()]
            return JsonResponse({"status": "success", "message": "美食信息添加成功", "category_stats": category_stats})
        elif action == 'update_food':
            food_id = request.POST.get('food_id')
            update_data = json.loads(request.POST.get('update_data'))
            existing_foods_df = spark.sql("SELECT * FROM food_info")
            updated_df = existing_foods_df.withColumn("food_name", when(col("food_id") == food_id, update_data.get('food_name')).otherwise(col("food_name"))).withColumn("price", when(col("food_id") == food_id, update_data.get('price')).otherwise(col("price"))).withColumn("category", when(col("food_id") == food_id, update_data.get('category')).otherwise(col("category"))).withColumn("description", when(col("food_id") == food_id, update_data.get('description')).otherwise(col("description")))
            updated_df.write.mode("overwrite").saveAsTable("food_info")
            price_analysis = spark.sql("SELECT category, MIN(price) as min_price, MAX(price) as max_price, AVG(price) as avg_price FROM food_info GROUP BY category ORDER BY avg_price DESC")
            price_stats = [{"category": row.category, "min_price": row.min_price, "max_price": row.max_price, "avg_price": round(row.avg_price, 2)} for row in price_analysis.collect()]
            return JsonResponse({"status": "success", "message": "美食信息更新成功", "price_analysis": price_stats})
        elif action == 'delete_food':
            food_id = request.POST.get('food_id')
            existing_foods_df = spark.sql("SELECT * FROM food_info")
            filtered_df = existing_foods_df.filter(col("food_id") != food_id)
            filtered_df.write.mode("overwrite").saveAsTable("food_info")
            category_distribution = spark.sql("SELECT category, COUNT(*) as food_count FROM food_info GROUP BY category ORDER BY food_count DESC")
            distribution_data = [{"category": row.category, "count": row.food_count} for row in category_distribution.collect()]
            return JsonResponse({"status": "success", "message": "美食信息删除成功", "category_distribution": distribution_data})
    elif request.method == 'GET':
        search_keyword = request.GET.get('keyword', '')
        category_filter = request.GET.get('category', '')
        price_range = request.GET.get('price_range', '')
        foods_df = spark.sql("SELECT * FROM food_info")
        if search_keyword:
            foods_df = foods_df.filter(col("food_name").contains(search_keyword) | col("description").contains(search_keyword))
        if category_filter:
            foods_df = foods_df.filter(col("category") == category_filter)
        if price_range:
            price_min, price_max = map(float, price_range.split('-'))
            foods_df = foods_df.filter((col("price") >= price_min) & (col("price") <= price_max))
        foods_with_ratings = foods_df.join(spark.sql("SELECT food_id, AVG(rating) as avg_rating, COUNT(rating) as rating_count FROM user_ratings GROUP BY food_id"), "food_id", "left")
        result_foods = foods_with_ratings.orderBy(desc("avg_rating")).collect()
        foods_data = [{"food_id": row.food_id, "food_name": row.food_name, "category": row.category, "price": row.price, "restaurant": row.restaurant, "description": row.description, "avg_rating": round(row.avg_rating or 0, 2), "rating_count": row.rating_count or 0} for row in result_foods]
        return JsonResponse({"status": "success", "foods": foods_data, "total_count": len(foods_data)})
@csrf_exempt
def prediction_data_analysis(request):
    analysis_type = request.POST.get('analysis_type', 'consumption_trend')
    time_range = request.POST.get('time_range', '30')
    end_date = datetime.now()
    start_date = end_date - timedelta(days=int(time_range))
    if analysis_type == 'consumption_trend':
        daily_consumption = spark.sql(f"SELECT DATE(order_time) as order_date, COUNT(*) as order_count, SUM(total_amount) as total_revenue FROM order_records WHERE order_time >= '{start_date.strftime('%Y-%m-%d')}' AND order_time <= '{end_date.strftime('%Y-%m-%d')}' GROUP BY DATE(order_time) ORDER BY order_date")
        trend_data = [{"date": row.order_date.strftime('%Y-%m-%d'), "order_count": row.order_count, "revenue": float(row.total_revenue)} for row in daily_consumption.collect()]
        consumption_df = spark.createDataFrame([(i, data['order_count']) for i, data in enumerate(trend_data)], ["day_index", "order_count"])
        avg_orders = consumption_df.agg(avg("order_count")).collect()[0][0]
        prediction_days = 7
        predicted_data = []
        for i in range(1, prediction_days + 1):
            predicted_orders = avg_orders * (1 + np.sin(i * 0.5) * 0.1)
            predicted_revenue = predicted_orders * 25.8
            future_date = end_date + timedelta(days=i)
            predicted_data.append({"date": future_date.strftime('%Y-%m-%d'), "predicted_orders": round(predicted_orders), "predicted_revenue": round(predicted_revenue, 2)})
        return JsonResponse({"status": "success", "historical_trend": trend_data, "predictions": predicted_data, "analysis_type": "consumption_trend"})
    elif analysis_type == 'category_preference':
        category_analysis = spark.sql(f"SELECT f.category, COUNT(o.order_id) as order_count, AVG(r.rating) as avg_rating, SUM(o.total_amount) as category_revenue FROM order_records o JOIN food_info f ON o.food_id = f.food_id LEFT JOIN user_ratings r ON o.food_id = r.food_id WHERE o.order_time >= '{start_date.strftime('%Y-%m-%d')}' GROUP BY f.category ORDER BY order_count DESC")
        category_data = [{"category": row.category, "order_count": row.order_count, "avg_rating": round(row.avg_rating or 0, 2), "revenue": float(row.category_revenue), "market_share": 0} for row in category_analysis.collect()]
        total_orders = sum([data['order_count'] for data in category_data])
        for data in category_data:
            data['market_share'] = round((data['order_count'] / total_orders) * 100, 2) if total_orders > 0 else 0
        category_growth_prediction = []
        for data in category_data:
            growth_rate = np.random.uniform(-0.1, 0.2)
            predicted_orders = data['order_count'] * (1 + growth_rate)
            predicted_revenue = data['revenue'] * (1 + growth_rate)
            category_growth_prediction.append({"category": data['category'], "current_orders": data['order_count'], "predicted_orders": round(predicted_orders), "growth_rate": round(growth_rate * 100, 2), "predicted_revenue": round(predicted_revenue, 2)})
        return JsonResponse({"status": "success", "category_analysis": category_data, "growth_predictions": category_growth_prediction, "analysis_type": "category_preference"})
    elif analysis_type == 'user_behavior':
        user_behavior_analysis = spark.sql(f"SELECT u.user_id, COUNT(o.order_id) as order_frequency, AVG(o.total_amount) as avg_order_value, MAX(o.order_time) as last_order_time FROM user_info u LEFT JOIN order_records o ON u.user_id = o.user_id WHERE o.order_time >= '{start_date.strftime('%Y-%m-%d')}' GROUP BY u.user_id")
        behavior_segments = user_behavior_analysis.withColumn("user_segment", when(col("order_frequency") >= 20, "高频用户").when(col("order_frequency") >= 10, "中频用户").when(col("order_frequency") >= 5, "低频用户").otherwise("新用户"))
        segment_stats = behavior_segments.groupBy("user_segment").agg(count("user_id").alias("user_count"), avg("avg_order_value").alias("segment_avg_value")).collect()
        segment_data = [{"segment": row.user_segment, "user_count": row.user_count, "avg_order_value": round(row.segment_avg_value or 0, 2)} for row in segment_stats]
        peak_hours_analysis = spark.sql(f"SELECT HOUR(order_time) as hour, COUNT(*) as order_count FROM order_records WHERE order_time >= '{start_date.strftime('%Y-%m-%d')}' GROUP BY HOUR(order_time) ORDER BY hour")
        peak_hours_data = [{"hour": row.hour, "order_count": row.order_count} for row in peak_hours_analysis.collect()]
        return JsonResponse({"status": "success", "user_segments": segment_data, "peak_hours": peak_hours_data, "analysis_type": "user_behavior"})

校园美食推荐系统文档展示

在这里插入图片描述

💖💖作者:计算机毕业设计杰瑞 💙💙个人简介:曾长期从事计算机专业培训教学,本人也热爱上课教学,语言擅长Java、微信小程序、Python、Golang、安卓Android等,开发项目包括大数据、深度学习、网站、小程序、安卓、算法。平常会做一些项目定制化开发、代码讲解、答辩教学、文档编写、也懂一些降重方面的技巧。平常喜欢分享一些自己开发中遇到的问题的解决办法,也喜欢交流技术,大家有技术代码这一块的问题可以问我! 💛💛想说的话:感谢大家的关注与支持! 💜💜 网站实战项目 安卓/小程序实战项目 大数据实战项目 深度学校实战项目 计算机毕业设计选题推荐