导语
绿植之于家居的地位可以说是越来越重要,也是最容易出效果的软装物料了吧!
不过,不同于其他的软装选手,绿植拥有更柔软的生命,需要你花心思呵护。那么问题又来了,总是把植物养死怎么破?
**
**
之前一直有养一些小植物的小兴趣,但是每次都养不活,除了绿萝,哈哈哈!**养过的玫瑰花、栀子花带着花苞买回来几天就凉了!**23333~
这不,我不死心啊,最近入手了一小盆的多肉绿植。【听说多肉好养我才买的!】这盆是蓝鸟多肉植物哦!颜色特别好看~颜值高的一眼相中了。
就是不知道在我手里这盆多肉能撑几天?也不说为了养活这盆植物,至少要上心嘛~所以我小心小心的准备开始养了。
但是不知道多肉植物的习惯,光照等等——
SO 做了一款Python界面化小程序—多肉植物的数据查询小系统,养护的日朝、施肥、繁殖等等都在里面!
这次肯定养的活了哈!家里有种植多肉植物的这次可有福利啦,跟着小编一起吧~

正文
本文是基于Pyqt5的界面化小程序哦,首先是把环境都安装好啦!
(1)环境安装:Python3、Pycharm安装包;PyQt5、pillow、requests模块以及一些自带的模块。
模块安装统使用豆瓣的镜像源安装哈:pip install -i https://pypi.douban.com/simple/ +模块名。
(2)选择的多肉数据网址是这个:
http://www.mengsang.com/duorou/需要先获取这些数据。代码如下:
'''
Function:
多肉数据爬虫
'''
import os
import time
import random
import pickle
import requests
from lxml import etree
'''多肉数据爬虫'''
class SucculentCrawler():
def __init__(self, **kwargs):
self.referer_list = ["http://www.google.com/", "http://www.bing.com/", "http://www.baidu.com/", "https://www.360.cn/"]
self.ua_list = ['Mozilla/5.0 (Linux; Android 5.1.1; Z828 Build/LMY47V) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.111 Mobile Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.22 (KHTML, like Gecko) Chrome/25.0.1364.172 Safari/537.22',
'Mozilla/5.0 (iPad; CPU OS 8_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/47.0.2526.107 Mobile/12F69 Safari/600.1.4',
'Mozilla/5.0 (iPad; CPU OS 11_2_5 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) CriOS/64.0.3282.112 Mobile/15D60 Safari/604.1',
'Mozilla/5.0 (Linux; Android 7.1.1; SM-T350 Build/NMF26X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.111 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.124 Safari/537.36',
'Mozilla/5.0 (Linux; Android 6.0.1; SM-G610F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Mobile Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
'Mozilla/5.0 (Linux; Android 5.1.1; 5065N Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/46.0.2490.76 Mobile Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36']
self.page_urls = self.__getAllPageUrls()
self.page_pointer = -1
self.savedir = 'resources/succulents'
'''爬取下一页数据'''
def next(self):
# 获取链接
self.page_pointer += 1
if self.page_pointer >= len(self.page_urls):
return True
page_url = self.page_urls[self.page_pointer]
# 提取该页中多肉的图片+详情页链接
res = requests.get(page_url, headers=self.__randomHeaders())
res.encoding = 'gbk'
html = etree.HTML(res.text)
html = html.xpath('//span[@class="tImgUlImg"]')
succulent_list = []
for item in html:
succulent_list.append([item.xpath('a/@title')[0].replace('/', '-').replace('\\', '-'), item.xpath('a/img/@src')[0], item.xpath('a/@href')[0]])
# 爬取详情页数据
for item in succulent_list:
data = [item[0], item[1]]
headers = self.__randomHeaders()
headers.update({'Referer': page_url})
res = requests.get(item[-1], headers=headers)
res.encoding = 'gbk'
html_root = etree.HTML(res.text).xpath('//div[@class="cbRight"]/div[@class="mainBox"]')[0]
html = html_root.xpath('div[2]/table[@class="tTable"]/tr')[1:]
intro = ['繁殖: ', '易活度: ', '季节: ', '温度: ', '日照: ', '浇水量: ',
'日照说明: ', '浇水说明: ', '大类/属: ', '中文种名: ', '英文学名: ']
for idx, tr in enumerate(html):
if idx == 0:
intro[0] = intro[0] + tr.xpath('./td[2]/text()')[0] if tr.xpath('./td[2]/text()') else intro[0] + '未知'
intro[1] = intro[1] + int(tr.xpath('./td[4]/img/@src')[0].split('/')[-1].split('.')[0][1:]) * '⭐'
elif idx == 1:
intro[2] = intro[2] + tr.xpath('./td[2]/text()')[0] if tr.xpath('./td[2]/text()') else intro[2] + '未知'
intro[3] = intro[3] + tr.xpath('./td[4]/text()')[0].strip().replace(' ', '') if tr.xpath('./td[4]/text()') else intro[3]
elif idx == 2:
intro[4] = intro[4] + int(tr.xpath('./td[2]/img/@src')[0].split('/')[-1].split('.')[0]) * '☀'
intro[5] = intro[5] + int(tr.xpath('./td[4]/img/@src')[0].split('/')[-1].split('.')[0][1:]) * '💧'
html = html_root.xpath('div[2]/div')[0].xpath('//div[@class="pt5"]')
for idx, item in enumerate(html):
if idx == 0:
intro[6] = intro[6] + item.xpath('./span/text()')[0]
elif idx == 1:
intro[7] = intro[7] + item.xpath('./span/text()')[0]
elif idx == 3:
intro[8] = intro[8] + item.xpath('text()')[0] if item.xpath('text()') else intro[8] + '未知'
elif idx == 4:
intro[9] = intro[9] + item.xpath('text()')[0] if item.xpath('text()') else intro[9] + '未知'
elif idx == 5:
intro[10] = intro[10] + item.xpath('text()')[0] if item.xpath('text()') else intro[10] + '未知'
data.append(intro)
self.__saveItem(data)
time.sleep(random.random())
return False
'''数据保存'''
def __saveItem(self, data):
if not os.path.exists(self.savedir):
os.mkdir(self.savedir)
savepath = os.path.join(self.savedir, data[0])
if not os.path.exists(savepath):
os.mkdir(savepath)
f = open(os.path.join(savepath, 'show.jpg'), 'wb')
f.write(requests.get(data[1], headers=self.__randomHeaders()).content)
f.close()
f = open(os.path.join(savepath, 'info.pkl'), 'wb')
pickle.dump(data, f)
f.close()
'''获得所有链接'''
def __getAllPageUrls(self):
res = requests.get('http://www.mengsang.com/duorou/list_1_1.html', headers=self.__randomHeaders())
res.encoding = 'gbk'
html = etree.HTML(res.text)
num_pages = html.xpath('//span[@class="pageinfo"]/strong')[0].text
page_urls = ['http://www.mengsang.com/duorou/list_1_%s.html' % i for i in range(1, int(num_pages)+1)]
return page_urls
'''随机请求头'''
def __randomHeaders(self):
return {'user-agent': random.choice(self.ua_list), 'referer': random.choice(self.referer_list)}
(3)系统界面设置。
class SucculentQuery(QWidget):
def __init__(self, parent=None, **kwargs):
super(SucculentQuery, self).__init__(parent)
self.setWindowTitle('多肉植物——数据查询系统')
self.setWindowIcon(QIcon('resources/icon/icon.png'))
# 定义组件
self.label_name = QLabel('多肉名称: ')
self.line_edit = QLineEdit()
self.button_find = QPushButton()
self.button_find.setText('查询')
self.label_result = QLabel('查询结果:')
self.show_label = QLabel()
self.show_label.setFixedSize(300, 300)
self.showLabelImage('resources/icon/icon.png')
self.text_result = QTextEdit()
self.button_random = QPushButton()
self.button_random.setText('随机读取')
self.button_update = QPushButton()
self.button_update.setText('数据更新')
self.tip_label = QLabel()
self.tip_label.setText('数据状态: 未在更新数据, 数据更新进度: 0/0')
# 排版
self.grid = QGridLayout()
self.grid.addWidget(self.label_name, 0, 0, 1, 1)
self.grid.addWidget(self.line_edit, 0, 1, 1, 30)
self.grid.addWidget(self.button_find, 0, 31, 1, 1)
self.grid.addWidget(self.button_random, 0, 32, 1, 1)
self.grid.addWidget(self.button_update, 0, 33, 1, 1)
self.grid.addWidget(self.tip_label, 1, 0, 1, 31)
self.grid.addWidget(self.label_result, 2, 0)
self.grid.addWidget(self.text_result, 3, 0, 1, 34)
self.grid.addWidget(self.show_label, 3, 34, 1, 1)
self.setLayout(self.grid)
self.resize(600, 400)
# 事件绑定
self.button_find.clicked.connect(self.find)
self.button_random.clicked.connect(self.randomRead)
self.button_update.clicked.connect(lambda _: threading.Thread(target=self.update).start())
效果如下所示——
总结
想让多肉植物正常长大,一方面要保证根系是好的,另外一方面要少量施肥,不要干旱得太厉害。
尤其秋冬给肉肉控状态的时候,长期不浇水老叶子消耗比较多,叶片聚拢,新叶迟迟长不出,虽然颜色出来了,但肉肉会明显变小啦~啥时候我养的绿植成这样子那就真的成功了:
完整的免费源码领取处:
如需完整的项目源码+素材源码基地见:#私信小编06#或者点击蓝色文字添加即可获取免费的福利!
你们的支持是我最大的动力!!记得三连哦~mua 欢迎大家阅读往期的文章哦~
推荐往期文章:
Python—2021 |已有文章汇总 | 持续更新,直接看这篇就够了~