flask基本知识

688 阅读1分钟

启动项初始化

# 首先引入flask框架
from flask import Flask, render_template, request
app = Flask(__name__)

@app.route('/')
def index():
	# 加载index页面
    return render_template('index.html')

if __name__ == '__main__':
	# 开启debug模式
    app.debug = True
    app.run()

flask框架指定了模板文件的默认文件夹templates和静态文件的默认文件夹static,以及静态文件访问的默认路径前缀/static,当然也可以对其进行修改

app = Flask(import_name=__name__,
            static_url_path='/python', # 配置静态文件的访问 url 前缀
            static_folder='static',    # 配置静态文件的文件夹
            template_folder='templates') # 配置模板文件的文件夹

请求接口

import json
@app.route('/login',methods=['POST','GET'])
def login():
	# 请求方式不同,获取数据方式不同
	if request.method == 'POST':
	    tele = request.form.get('tele')
	    pwd = request.form.get('pwd')
	else:
		tele = request.args.get('tele')
		pwd = request.args.get('pwd')
	# 返回序列化json,前台获取到返回结果后反序列化
	return json.dumps({
		"status": 1
	})

连接mysql

import pymysql
db = pymysql.connect(address, user, password, database)
# 获取当前光标
cursor = db.cursor()
sql = "select * from test"
# 执行sql
cursor.excute(sql)
# 获取查询的所有结果(元祖)
res = cursor.fetchall() 

sql2 = "insert into test(user, name) values('123','hello')"
cursor.excute(sql2)
# 修改表数据后必须要commit才能实现,如果想放弃操作,用db.rollback()
db.commit()

# 关闭连接
cursor.close()
db.close()

md5加密

import hashlib
def makePwd(pwd):
    # 创建MD5对象
    hash = hashlib.md5()
    # 更新hash对象的值
    hash.update(pwd.encode('utf-8'))
    # 进行加密
    res = hash.hexdigest()
    return res

下载文件

from flask import send_from_directory
app = Flask(__name__, static_url_path='')

@app.route('/download/<path:filename>')
def downloader(filename):
    dirpath = os.path.join(app.root_path, 'static/download')
    return send_from_directory(dirpath, filename, as_attachment=True)

读写文件

# 读文件
with open('result.txt', 'r', encoding='utf-8') as f:
        lines = f.readlines()
        for line in lines:
        	line = line.strip().split(',')
        	....

# 写文件
fp = open('warehouse.txt', 'a', encoding='utf-8')
fp.write('%s,%s,%s,%s\n' % (TIMESTAMP, code, name, price))
fp.close()

爬虫

import requests
from bs4 import BeautifulSoup

URL = 'https://www.xiachufang.com/category/'
response = requests.get(URL)
# 使用bs4解析Html
bs = BeautifulSoup(response.text,features = 'html.parser')
# 获取符合条件的div列表
listOne = bs.findAll('div',{'class':'cates-list'})

for lo in listOne:
	# 获取h3的文本
	name0 = lo.find('h3',{'class':'font20'}).get_text()