- 小知识,大挑战!本文正在参与“程序员必备小知识”创作活动。
相册需要在本地建立文件夹,为了维护相册,需要自动生成对应图像的json文件,并自动压缩上传,本文分享我的python程序,提供一种解决方案供大家使用参考。
准备工作
-
qshell 上传至七牛云图床 Next -22- 添加相册系列 -2- 使用七牛云qshell同步图像目录
-
python 运行环境
-
准备好的图像文件夹
目标
- 自动读取文件夹中的所有图像
- 压缩到固定尺寸
- 转存到指定的临时文件夹
- 自动上传至七牛云
- 自动提取图像信息并保存到json
- 整合所有相册信息到json
工程代码
配置文件
import platform
class Parameters:
Image_Root_Path = r'/media/vvd/新加卷/pic' # root of source raw images
Hexo_Root_Path =r'/home/vvd/VVD_Work/Hexo/VVD_Hexo' # root of Hexo dir
Hexo_Sub_Dir_to_Photos = r'source/photos' # my source dir for album
Uploading_Temp_Image_Path = r'/home/vvd/VVD_Work/Hexo/Album_Temp' # root of temp image dir
Album_Ddescription_File_Name = 'readme.json' # name of json file in each image dir
Log_File_Name = 'Album_Log.log' # name of log file
Album_Total_Json = 'album.json' # json info of album
Album_Md_File_Name = 'index.md' # name of markdown file of hexo
Qshell_Config_File_Name = 'upload.conf' # qiniu qshell config file name
Logging_Object = None # object of logging class
Image_Info_Get_Object = None # instance of PhotoExifInfo
Image_Extension_List =['jpg','png','jpeg','bmp'] # list of common image extensions
Image_Resize_Width = 2000 # image width after resize
Whether_Overwrite_Old_Temp_File = False # whether overwrite old file while the program run again
Image_Url_Prefix =r'https://photos.zywvvd.com/vvd_hexo/' # prefix of image url
Force_Upload = False # upload images no matter what happens
Current_System = platform.system() # current operating system
if Current_System == 'Linux':
Split_Char ='/' # split char of current operating system
elif Current_System == 'Windows':
Split_Char ='\\'
图像信息提取
# getinfo.py
class PhotoExifInfo():
def __init__(self):
if os.path.exists('vvdkey.py'):
from vvdkey import baidu_key # 使用你自己的百度key
self.baidu_map_ak = baidu_key
else:
## set your baidu map key here
self.baidu_map_ak = ""
self.interested_keys = {
# camera Model
'Image Model':'Image_Model',#'相机型号',
# Aperture 1/value
'EXIF FNumber':'EXIF_FNumber',#'光圈',
# Focal Length
'EXIF FocalLength':'EXIF_FocalLength',#'焦距',
# Exposure Mode
'EXIF ExposureMode':'EXIF_ExposureMode',#'曝光模式',
# ExposureTime in seconds
'EXIF ExposureTime':'EXIF_ExposureTime',#'曝光时间',
# ISO
'EXIF ISOSpeedRatings':'EXIF_ISOSpeedRatings',#'ISO',
# date of photo
'Image DateTime':'Image_DateTime'#'拍摄时间',
}
def get_image_info(self,image_path):
"""
获取照片信息
"""
image_info_dict={}
with open(image_path, 'rb') as fp:
tags = exifread.process_file(fp)
for j in tags:
print(f"{j} :{tags[j]}")
for item in tuple(self.interested_keys):
try:
info = tags[item].printable
if item == 'EXIF FNumber':
if '/' in info:
A,B = info.split('/')
info = 'f'+format(int(A)/int(B),'.1f')
else:
info = 'f'+info
if item == 'EXIF FocalLength':
info = info+'mm'
if item == 'EXIF ExposureTime':
info = info+'s'
image_info_dict[self.interested_keys[item]] = info
except:
print(f'{image_path} has no attribute of {item}')
continue
try:
localcation = self._get_city_info(tags)
if localcation != "":
image_info_dict['positon'] = localcation
except:
print(f'{image_path} has no GPS info')
return image_info_dict
def _get_lng_lat(self, tags):
"""
经纬度转换
"""
try:
# 纬度
LatRef = tags["GPS GPSLatitudeRef"].printable
Lat = tags["GPS GPSLatitude"].printable[1:-1].replace(" ", "").replace("/", ",").split(",")
Lat = float(Lat[0]) + float(Lat[1]) / 60 + float(Lat[2]) / 3600
if LatRef != "N":
Lat = Lat * (-1)
# 经度
LonRef = tags["GPS GPSLongitudeRef"].printable
Lon = tags["GPS GPSLongitude"].printable[1:-1].replace(" ", "").replace("/", ",").split(",")
Lon = float(Lon[0]) + float(Lon[1]) / 60 + float(Lon[2]) / 3600
if LonRef != "E":
Lon = Lon * (-1)
return Lat,Lon
except:
print('Unable to get')
def _get_city_info(self, tags):
result = self._get_lng_lat(tags)
if result:
Lat, Lon = result
url = "https://api.map.baidu.com/reverse_geocoding/v3/?ak="+self.baidu_map_ak+"&output=json&coordtype=wgs84ll&location=" + str(Lat) + ',' + str(Lon)
#url = "https://api.map.baidu.com/reverse_geocoding/v3/?ak="+self.baidu_map_ak+"&output=json&coordtype=wgs84ll&location=31.225696563611,121.49884033194"
response = requests.get(url).json()
status = response['status']
if status == 0:
address = response['result']['formatted_address']
if address != "":
return(address)
else:
print('baidu_map error')
return ""
主要功能函数
import config
import json
import os
import cv2
import shutil
import logging
import time
from getinfo import PhotoExifInfo
def log_init():
"""
initialize logging
save the logging object in `config.Parameters.Logging_Object`
after this operation,
we could save logs with sample orders such as `logging.debug('test debug')` `logging.info('test info')`
logging level : debug < info < warning <error < critica
"""
log_file_path = os.path.join(config.Parameters.Uploading_Temp_Image_Path, config.Parameters.Log_File_Name)
if os.path.exists(log_file_path):
# open log file as mode of append
open_type = 'a'
else:
# open log file as mode of write
open_type = 'w'
logging.basicConfig(
# 日志级别,logging.DEBUG,logging.ERROR
level = logging.INFO,
# 日志格式: 时间、代码所在文件名、代码行号、日志级别名字、日志信息
#format = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
format = '%(asctime)s : %(message)s',
# 打印日志的时间
datefmt = '%Y-%m-%d %H:%M:%S',
# 日志文件存放的目录(目录必须存在)及日志文件名
filename = log_file_path,
# 打开日志文件的方式
filemode = open_type
)
config.Parameters.Logging_Object = logging
def log(message):
"""
print message to console and than save it to log
"""
print(message)
if config.Parameters.Logging_Object:
config.Parameters.Logging_Object.info(message)
else:
raise TypeError('there is sth wrong with the object of logging in config ')
def dir_check(dir_path):
"""
check if `dir_path` is a real directory path
"""
if not os.path.isdir(dir_path):
try:
os.makedirs(dir_path)
except Exception as err:
print(f'failed to make dir {dir_path}, error {err}')
finally:
log(f'{dir_path} does not exist and we have built it for you ')
def environment_check():
"""
check album environment, try to fix the matters if sth goes wrong
"""
## check important dirs
assert os.path.isdir(config.Parameters.Image_Root_Path) ,f"image root dir {config.Parameters.Image_Root_Path} does not exist"
assert os.path.isdir(config.Parameters.Hexo_Root_Path) ,f"hexo root dir {config.Parameters.Hexo_Root_Path} does not exist"
dir_check(config.Parameters.Uploading_Temp_Image_Path)
## check image width, I think it shold not < 200
config.Parameters.Image_Resize_Width = max(int(config.Parameters.Image_Resize_Width), 200)
## initialize logging
log_init()
## image_info_get object
config.Parameters.Image_Info_Get_Object = PhotoExifInfo()
def init_json_create(json_path):
"""
init a json file to `json_path`
"""
assert os.path.basename(json_path).split('.')[-1]=='json',f'{json_path} should be a path of json file '
assert os.path.isdir(os.path.split(json_path)[0]), 'the folder does not exist'
now = time.localtime()
info_dict={}
## time
my_time={
'year':now.tm_year,\
'month':now.tm_mon,\
'day':now.tm_mday
}
info_dict['time'] = my_time
## type
info_dict['type'] = 'album type'
## model
info_dict['model'] = ''
## position
position = {
'city':'beijing',\
'street':'Tiananmen'
}
info_dict['position'] = position
## title
info_dict['title'] = 'Album title'
## balabala
info_dict['balabala'] = 'description of album'
with open(json_path, mode='w') as fp:
json.dump(info_dict, fp, indent=3)
log(f'make initial json file {json_path}')
def image_compression_and_save(src_dir, tar_dir):
"""
compress and move images from src to tar
"""
sub_image_dirs = os.listdir(src_dir)
for sub_image_dir in sub_image_dirs:
src_sub_image_path = os.path.join(src_dir, sub_image_dir)
tar_sub_image_path = os.path.join(tar_dir, sub_image_dir)
## check tar dir
dir_check(tar_sub_image_path)
file_list = os.listdir(src_sub_image_path)
for item in file_list:
extension = item.split('.')[-1]
if extension.lower() in config.Parameters.Image_Extension_List:
## is image file
image_path = os.path.join(src_sub_image_path, item)
tar_image_path = os.path.join(tar_sub_image_path, item)
if not os.path.exists(tar_image_path) or config.Parameters.Whether_Overwrite_Old_Temp_File:
## new file or overwrite
image = cv2.imread(image_path)
image_shape = image.shape
if image_shape[1] >= config.Parameters.Image_Resize_Width:
## need compression
new_width = int(config.Parameters.Image_Resize_Width)
new_height = int(config.Parameters.Image_Resize_Width/image_shape[1]*image_shape[0])
new_image = cv2.resize(image,(new_width, new_height) )
cv2.imwrite(tar_image_path, new_image)
log(f'image {os.path.join(sub_image_dir,item)} has been resized to {(new_width, new_height)} and moved to temp dir')
else:
## small image, copy directly
shutil.copy(image_path, tar_image_path)
log(f'image {os.path.join(sub_image_dir,item)} has been moved to temp dir')
def deal_with_sub_json(src_dir, tar_dir):
"""
deal with sub json files in each sub dir
"""
sub_image_dirs = os.listdir(src_dir)
for sub_image_dir in sub_image_dirs:
src_sub_image_path = os.path.join(src_dir, sub_image_dir)
tar_sub_image_path = os.path.join(tar_dir, sub_image_dir)
## check tar dir
dir_check(tar_sub_image_path)
src_info_json_file_path = os.path.join(src_sub_image_path, config.Parameters.Album_Ddescription_File_Name)
if not os.path.exists(src_info_json_file_path):
init_json_create(src_info_json_file_path)
else:
print(f'{src_info_json_file_path} found')
with open(src_info_json_file_path) as fp:
album_info = json.load(fp)
file_list = os.listdir(src_sub_image_path)
image_info_list = []
for item in file_list:
extension = item.split('.')[-1]
if extension.lower() in config.Parameters.Image_Extension_List:
## is image file
image_path = os.path.join(src_sub_image_path, item)
assert not config.Parameters.Image_Info_Get_Object is None, "Image_Info_Get_Object should be a instance "
image_info_dict = config.Parameters.Image_Info_Get_Object.get_image_info(image_path)
image_info_dict['url'] = config.Parameters.Image_Url_Prefix+sub_image_dir+'/'+item
image_info_list.append(image_info_dict)
album_info['image_info'] = image_info_list
album_info['directory'] = sub_image_dir
tar_info_json_file_path = os.path.join(tar_sub_image_path, config.Parameters.Album_Ddescription_File_Name)
with open(tar_info_json_file_path,'w',encoding='utf-8') as fp:
json.dump(album_info, fp, indent=3, ensure_ascii=False)
log(f'json file of image dir {sub_image_dir} created')
def json_integrate(temp_root):
'''
integrate json files to a single file
'''
sub_image_dirs = os.listdir(temp_root)
album_dict = {}
album_json_list = []
for sub_image_dir in sub_image_dirs:
if os.path.isdir(os.path.join(temp_root,sub_image_dir)):
json_file_path = os.path.join(temp_root,sub_image_dir, config.Parameters.Album_Ddescription_File_Name)
assert os.path.exists(json_file_path),f'file {json_file_path} not flund'
with open(json_file_path) as fp:
json_dict = json.load(fp)
album_json_list.append(json_dict)
album_dict['album'] = album_json_list
integrate_json_path = os.path.join(temp_root, config.Parameters.Album_Total_Json)
with open(integrate_json_path, mode='w', encoding='utf-8') as fp:
json.dump(album_dict, fp, ensure_ascii=False, indent=3)
log(f'total json {integrate_json_path} saved')
return album_dict
def make_dir_json_md_of_album(temp_root, hexo_photos_path, album_dict):
'''
save md files to hexo source/photos dir
'''
assert os.path.isdir(hexo_photos_path) ,'hexo photos path {hexo_photos_path} does not exist'
src_album_json_path = os.path.join(temp_root, config.Parameters.Album_Total_Json)
assert os.path.exists(src_album_json_path), f'album json file {src_album_json_path} not found'
tar_album_json_path = os.path.join(hexo_photos_path, config.Parameters.Album_Total_Json)
shutil.copy(src_album_json_path, tar_album_json_path)
log(f"json file {config.Parameters.Album_Total_Json} moved to hexo dir")
for sub_image_dict in album_dict['album']:
src_json_path = os.path.join(temp_root, sub_image_dict['directory'], config.Parameters.Album_Ddescription_File_Name)
assert os.path.exists(src_json_path), f'sub json file {src_json_path} not found'
tar_json_path = os.path.join(hexo_photos_path, sub_image_dict['directory'], config.Parameters.Album_Ddescription_File_Name)
image_dir = os.path.join(hexo_photos_path, sub_image_dict['directory'])
dir_check(image_dir)
shutil.copyfile(src_json_path, tar_json_path)
log(f"json file {sub_image_dict['directory'] +'_'+ config.Parameters.Album_Ddescription_File_Name} moved to hexo dir")
with open(os.path.join(image_dir,config.Parameters.Album_Md_File_Name),'w', encoding='utf-8') as fp:
fp.writelines('---'+'\n')
fp.writelines('title: '+sub_image_dict['title']+'\n')
fp.writelines('date: '+str(sub_image_dict['time']['year'])+'-'+str(sub_image_dict['time']['month'])+'-'+str(sub_image_dict['time']['day'])+'\n')
fp.writelines('type: photography'+'\n')
fp.writelines('album: '+sub_image_dict['directory']+'\n')
fp.writelines('---'+'\n')
log(f'create md file of {os.path.join(image_dir,config.Parameters.Album_Md_File_Name)}')
主函数
import os
import config
from funs import environment_check
from funs import deal_with_sub_json
from funs import json_integrate
from funs import image_compression_and_save
from funs import make_dir_json_md_of_album
if __name__ =='__main__':
src_image_root = config.Parameters.Image_Root_Path
temp_image_root = config.Parameters.Uploading_Temp_Image_Path
hexo_photos_path = os.path.join(config.Parameters.Hexo_Root_Path,config.Parameters.Hexo_Sub_Dir_to_Photos)
## check config environment
environment_check()
## compress and save images
image_compression_and_save(src_image_root, temp_image_root)
## get info of images and save json of each dir
deal_with_sub_json(src_image_root, temp_image_root)
## integrate jsons
album_dict = json_integrate(temp_image_root)
## make folders and markdowns in dir of photos
make_dir_json_md_of_album(temp_image_root, hexo_photos_path, album_dict)
## upload images with qshell
if config.Parameters.Force_Upload:
os.system("rm -rf ~/.qshell/qupload")
cmd = "qshell qupload "+os.path.join(config.Parameters.Uploading_Temp_Image_Path,config.Parameters.Qshell_Config_File_Name)
os.system(cmd)
主要函数做了注释,希望可以给大家启发,如果想使用我这套代码可以到 hexo_album_process 下载
运行图示
- 初始相册文件夹pic
- 相册内部
- 每个文件夹需要有readme.json记录文件夹信息,程序会自动提取,示例如下:
{
"time":
{
"year":2015,
"month":11,
"day":28
},
"type":"风光",
"model":"",
"position":
{
"city":"合肥",
"street":"ustc"
},
"title":"USTC",
"balabala":
"关于母校。"
}
- 配置config.py
Image_Root_Path = r'/media/vvd/新加卷/pic' # root of source raw images
Hexo_Root_Path =r'/home/vvd/VVD_Work/Hexo/VVD_Hexo' # root of Hexo dir
Hexo_Sub_Dir_to_Photos = r'source/photos' # my source dir for album
Uploading_Temp_Image_Path = r'/home/vvd/VVD_Work/Hexo/Album_Temp' # root of temp image dir
Image_Url_Prefix =r'https://photos.zywvvd.com/vvd_hexo/' # prefix of image url
- 运行main.py
- 临时文件夹产生压缩后的图像文件夹与json文件
- 程序会在hexo->source-> photos文件夹内写入相册文件夹层级结构与对应json文件

- 文件夹内的json加入了图像的信息
{
"time": {
"year": 2015,
"month": 11,
"day": 28
},
"type": "风光",
"model": "",
"position": {
"city": "合肥",
"street": "ustc"
},
"title": "USTC",
"balabala": "关于母校。",
"image_info": [
{
"Image_Model": "NIKON D610",
"EXIF_FNumber": "f11",
"EXIF_FocalLength": "35mm",
"EXIF_ExposureMode": "Auto Exposure",
"EXIF_ExposureTime": "6s",
"EXIF_ISOSpeedRatings": "100",
"Image_DateTime": "2015:11:28 00:07:05",
"url": "https://photos.zywvvd.com/vvd_hexo/2015_11_28_hefei_ustc_fengjing/1.jpg"
},
{
"Image_Model": "NIKON D610",
"EXIF_FNumber": "f4",
"EXIF_FocalLength": "34mm",
"EXIF_ExposureMode": "Manual Exposure",
"EXIF_ExposureTime": "25s",
"Image_DateTime": "2015:11:28 22:13:29",
"url": "https://photos.zywvvd.com/vvd_hexo/2015_11_28_hefei_ustc_fengjing/2.jpg"
}
],
"directory": "2015_11_28_hefei_ustc_fengjing"
}
- 图像会自动上传到七牛云
小结
文中没有太先进的技术,主要提供了为静态页面自动维护相册问题的解决思路,并分享了json生成与图像上传的工程,有更好的实现方法欢迎交流。
参考链接
本文启发自 asdfv1929.github.io/2018/05/26/… 感谢前辈分享