提交 c7aafbd0 authored 作者: 贺阳's avatar 贺阳

若该提单里已有对应的小包已有节点推送日志,则不再重新推送;还要推送前序节点

上级 4b2b1e33
......@@ -3,6 +3,7 @@
import base64
import io
import json
import logging
import time
......@@ -55,7 +56,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
"""
Confirm operation # 确认操作
"""
#计算整个过程的耗时
# 计算整个过程的耗时
start_time = time.time()
_logger.info(f"开始执行批量获取POD信息操作")
bl_objs = self.get_order()
......@@ -88,10 +89,10 @@ class BatchGetPodInfoWizard(models.TransientModel):
# 再同步和回写
if self.sync_last_mile_pod and processed_files:
self._sync_last_mile_pod(processed_files)
# 同步推送匹配节点
if self.sync_match_node and processed_files:
self._sync_match_node(processed_files)
self.get_date_sync_match_node(processed_files)
end_time = time.time()
_logger.info(f"批量获取POD信息操作完成,耗时: {end_time - start_time}秒")
......@@ -257,11 +258,11 @@ class BatchGetPodInfoWizard(models.TransientModel):
if file_data:
# 将base64数据转换为二进制
pdf_binary = base64.b64decode(file_data)
# 先提取文本用于后续同步节点功能
if 'ocr_texts' not in file_info:
file_info['ocr_texts'] = self._extract_text_from_pdf_with_ocr(pdf_binary, bl.bl_no)
# 使用OCR方法处理PDF
processed_pdf = self._process_pdf_with_ocr(
pdf_data=pdf_binary,
......@@ -290,30 +291,30 @@ class BatchGetPodInfoWizard(models.TransientModel):
import pytesseract
import numpy as np
from PIL import Image
# 尝试导入OpenCV,如果失败则使用PIL替代
try:
import cv2
cv2_available = True
except ImportError:
cv2_available = False
# 设置Tesseract路径
self._setup_tesseract_path()
# 打开PDF文档
pdf_document = fitz.open(stream=pdf_binary, filetype="pdf")
page_texts = {}
# 遍历每一页提取文本
for page_num in range(len(pdf_document)):
page = pdf_document[page_num]
# 将页面转换为图像
mat = fitz.Matrix(2.0, 2.0) # 提高分辨率
pix = page.get_pixmap(matrix=mat)
img_data = pix.tobytes("png")
# 转换为PIL图像
if cv2_available:
nparr = np.frombuffer(img_data, np.uint8)
......@@ -323,10 +324,10 @@ class BatchGetPodInfoWizard(models.TransientModel):
pil_img = Image.open(io.BytesIO(img_data))
if pil_img.mode != 'RGB':
pil_img = pil_img.convert('RGB')
# OCR配置
config = '--psm 6 --oem 1 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,- -c preserve_interword_spaces=1'
# 使用Tesseract进行OCR识别
try:
text_content = pytesseract.image_to_string(pil_img, config=config, lang='eng')
......@@ -334,7 +335,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
except Exception as e:
_logger.error(f"OCR识别失败,第 {page_num + 1} 页: {str(e)}")
page_texts[page_num] = ""
pdf_document.close()
return page_texts
......@@ -807,11 +808,22 @@ class BatchGetPodInfoWizard(models.TransientModel):
_logger.error(f"保存PDF附件失败: {str(e)}")
raise ValidationError(_('Failed to save PDF attachment: %s') % str(e))
def _sync_match_node(self, processed_files):
def get_date_sync_match_node(self, processed_files):
"""
Sync matched node based on POD file, extract time from red boxes # 根据POD文件同步匹配节点
:param processed_files: 处理后的文件数组
"""
ship_packages, pod_node_id = self.get_detail_info(processed_files)
self._sync_match_node(ship_packages, pod_node_id)
def get_detail_info(self, processed_files):
"""
获取提单对应的节点以及时间
:param processed_files: 处理后的文件数组
:return: 提单对应的节点以及节点操作时间
"""
ship_packages = []
error_bl = []
# 查找对应的清关节点(勾选了POD节点匹配的节点)
pod_node = self.env['cc.node'].search([
('is_pod_node', '=', True),
......@@ -824,37 +836,54 @@ class BatchGetPodInfoWizard(models.TransientModel):
if not pod_node:
_logger.info(f"未找到尾程POD节点匹配的节点,提单号: {bl.bl_no}")
continue
# 从PDF文件提取红色框的时间
file_data = file_info.get('file_data')
if not file_data:
_logger.warning(f"提单 {bl.bl_no} 没有文件数据")
continue
try:
# 如果已识别过OCR文本,则复用
ocr_texts = file_info.get('ocr_texts')
# 解析PDF提取时间
extracted_times = self._extract_time_from_pdf(file_data, bl.bl_no, ocr_texts=ocr_texts)
if extracted_times:
# 取最早的时间作为节点操作时间
earliest_time = min(extracted_times)
_logger.info(f"提取到最早时间: {earliest_time},将作为节点操作时间")
# 这里需要实现具体的节点操作逻辑
# 根据实际业务需求,可能需要更新某个字段或调用某个方法
# 例如:更新节点的操作时间或状态
# pod_node.operation_time = earliest_time
# 或者调用某个方法来记录节点操作
_logger.info(f"为提单 {bl.bl_no} 同步节点操作时间: {earliest_time}")
ship_packages.append({
'bl_id': bl.id,
'id': bl.ship_package_ids.ids,
'tally_time': str(earliest_time)
})
else:
_logger.info(f"未从POD文件中提取到时间信息,提单号: {bl.bl_no}")
_logger.warning(f"提单 {bl.bl_no} 没有提取到时间信息")
error_bl.append(bl)
except Exception as e:
_logger.error(f"同步匹配节点失败,提单号: {bl.bl_no}, 错误: {str(e)}")
_logger.error(f"获取提单对应的节点以及时间失败,提单号: {bl.bl_no}, 错误: {str(e)}")
error_bl.append(bl)
if error_bl:
_logger.warning(f"提单 {', '.join([bl.bl_no for bl in error_bl])} 没有提取到时间信息")
raise ValidationError(
_('%s bill of loading cannot get node operation time,please manually upload push tk') % (
', '.join([bl.bl_no for bl in error_bl]))) # xx提单号没有获取到节点操作时间,请手动上传推送提单到TK
return ship_packages, pod_node.id
def _sync_match_node(self, ship_packages, pod_node_id):
"""
同步匹配节点
:param ship_packages: 提单对应的小包以及节点信息
:param pod_node_id: 尾程POD节点匹配的节点ID
"""
# 若该提单里已有对应的小包已有节点推送日志,则不再重新推送;
_logger.info(f"同步匹配节点,提单: {ship_packages}, 节点: {pod_node_id}")
if ship_packages:
bl_objs = self.env['cc.bl'].sudo().search(
[('id', 'in', [ship_package.get('bl_id') for ship_package in ship_packages])])
redis_conn = self.env['common.common'].sudo().get_redis()
if redis_conn and redis_conn != 'no' and pod_node_id:
redis_conn.lpush('mail_push_package_list', json.dumps(
{'ids': bl_objs.ids, 'ship_packages': str(ship_packages), 'action_type': 'push_match_node',
'pod_node_id': pod_node_id}))
def _extract_time_from_pdf(self, file_data, bl_no, ocr_texts=None):
"""
......@@ -867,24 +896,24 @@ class BatchGetPodInfoWizard(models.TransientModel):
"""
import re
from datetime import datetime
extracted_times = []
try:
# 如果没有提供OCR文本,则调用OCR识别
if ocr_texts is None:
pdf_binary = base64.b64decode(file_data)
ocr_texts = self._extract_text_from_pdf_with_ocr(pdf_binary, bl_no)
# 使用已识别的文本内容查找时间
for page_num, text_content in ocr_texts.items():
# 初始化本页时间列表
page_times = []
# 根据图片中的格式,优先提取 "DATE/TIME OF RELEASE" 后面的时间
# 格式示例: "21:47 20-OCT-2025", "08:35 17-OCT-2025", "13:52 17-OCT-2025"
# OCR识别时可能没有空格,如 "DATETIMEOFRELEASE160220-SEP-2025"
# 首先尝试找到 "DATE/TIME OF RELEASE" 后面的时间(红色框中的时间)
# 匹配两种情况:
# 1. 有空格: "DATE/TIME OF RELEASE 16:02 20-SEP-2025"
......@@ -903,7 +932,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
# OCR可能将:识别为多个空格,如"DATETIMEOFRELEASE 163420-SEP-2025"
r'DATETIMEOFRELEASE\s+(\d)(\d)(\d{2})(\d{2})-([A-Z]{3})-(\d{4})',
]
for idx, pattern in enumerate(release_time_patterns):
release_time_match = re.search(pattern, text_content, re.IGNORECASE)
if release_time_match:
......@@ -913,18 +942,19 @@ class BatchGetPodInfoWizard(models.TransientModel):
# 有空格的情况: ('16:02', '20-SEP-2025')
time_part = release_time_match.group(1)
date_part = release_time_match.group(2)
# 转换月份缩写为大写(Python strptime需要)
month_map = {
'JAN': 'Jan', 'FEB': 'Feb', 'MAR': 'Mar', 'APR': 'Apr',
'MAY': 'May', 'JUN': 'Jun', 'JUL': 'Jul', 'AUG': 'Aug',
'SEP': 'Sep', 'OCT': 'Oct', 'NOV': 'Nov', 'DEC': 'Dec'
}
# 如果日期部分中的月份是大写,转换为首字母大写
for key, value in month_map.items():
date_part = re.sub(r'-{}\b'.format(key), f'-{value}', date_part, flags=re.IGNORECASE)
date_part = re.sub(r'-{}\b'.format(key), f'-{value}', date_part,
flags=re.IGNORECASE)
match_str = f"{time_part} {date_part}"
time_obj = datetime.strptime(match_str, '%H:%M %d-%b-%Y')
elif group_count == 5:
......@@ -934,7 +964,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
day = release_time_match.group(3)
month = release_time_match.group(4)
year = release_time_match.group(5)
# 转换月份缩写
month_map = {
'JAN': 'Jan', 'FEB': 'Feb', 'MAR': 'Mar', 'APR': 'Apr',
......@@ -942,10 +972,11 @@ class BatchGetPodInfoWizard(models.TransientModel):
'SEP': 'Sep', 'OCT': 'Oct', 'NOV': 'Nov', 'DEC': 'Dec'
}
month_normalized = month_map.get(month.upper(), month.capitalize())
# 直接手动创建datetime对象,避免strptime的格式问题
month_num = {'JAN':1,'FEB':2,'MAR':3,'APR':4,'MAY':5,'JUN':6,
'JUL':7,'AUG':8,'SEP':9,'OCT':10,'NOV':11,'DEC':12}[month.upper()]
month_num = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,
'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12}[
month.upper()]
time_obj = datetime(int(year), month_num, int(day), int(hour), int(minute))
match_str = f"{hour}:{minute} {day.zfill(2)}-{month_normalized}-{year}"
elif group_count == 6:
......@@ -956,27 +987,29 @@ class BatchGetPodInfoWizard(models.TransientModel):
day = release_time_match.group(4)
month = release_time_match.group(5)
year = release_time_match.group(6)
# 组合小时和分钟
hour = hour_tens + hour_ones
minute = minute_str
# 直接手动创建datetime对象
month_num = {'JAN':1,'FEB':2,'MAR':3,'APR':4,'MAY':5,'JUN':6,
'JUL':7,'AUG':8,'SEP':9,'OCT':10,'NOV':11,'DEC':12}[month.upper()]
month_num = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,
'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12}[
month.upper()]
time_obj = datetime(int(year), month_num, int(day), int(hour), int(minute))
match_str = f"{hour}:{minute} {day.zfill(2)}-{month}-{year}"
else:
_logger.warning(f"意外的分组数量: {group_count}, 分组: {release_time_match.groups()}")
continue
page_times.append(time_obj)
break
except Exception as e:
_logger.warning(f"解析DATE/TIME OF RELEASE时间失败: {release_time_match.groups()}, 错误: {str(e)}")
_logger.warning(
f"解析DATE/TIME OF RELEASE时间失败: {release_time_match.groups()}, 错误: {str(e)}")
else:
continue
# 然后查找其他时间格式
time_patterns = [
# RELEASE NOTE格式: HH:MM DD-MON-YYYY
......@@ -986,7 +1019,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
r'\d{2}/\d{2}/\d{4}\s+\d{2}:\d{2}', # 31/12/2023 12:30
r'\d{4}/\d{2}/\d{2}\s+\d{2}:\d{2}:\d{2}', # 2023/12/31 12:30:00
]
for pattern in time_patterns:
matches = re.findall(pattern, text_content)
for match in matches:
......@@ -1015,22 +1048,22 @@ class BatchGetPodInfoWizard(models.TransientModel):
continue
else:
continue
except Exception as e:
_logger.warning(f"解析时间失败: {match}, 错误: {str(e)}")
continue
# 如果本页有提取到时间,记录最早的时间
if page_times:
earliest_page_time = min(page_times)
extracted_times.append(earliest_page_time)
# 如果有提取到时间,返回最早的时间
if extracted_times:
earliest_time = min(extracted_times)
return [earliest_time]
except Exception as e:
_logger.error(f"提取PDF时间信息失败,提单号: {bl_no}, 错误: {str(e)}")
return extracted_times
......@@ -457,7 +457,7 @@ class CcBl(models.Model):
raise ValidationError(
_('The small package node or bill of lading node is not in the completed node, and the bill of lading cannot be changed to completed!')) # 小包节点或提单节点不在已完成节点,提单不能变为已完成!
def done_func(self, is_email=False,**kwargs):
def done_func(self, is_email=False, **kwargs):
"""
变为已完成.先进行提单巡查,再进行提单状态变更
"""
......@@ -475,7 +475,7 @@ class CcBl(models.Model):
content = self.get_patrol_email_content(result)
raise ValidationError(content)
if is_success or kwargs.get('exception_reason'):
super(CcBl, self).done_func(is_email=is_email,**kwargs)
super(CcBl, self).done_func(is_email=is_email, **kwargs)
def check_bl_patrol(self):
"""
......@@ -1226,7 +1226,7 @@ class CcBl(models.Model):
return False
def mail_auto_push(self, mail_time=False, tally_ship_packages=[], action_type='tally', mail_db_user='邮件接收',
pda_db_user='pda'):
pda_db_user='pda', pod_node_id=False):
self = self.with_context(dict(self._context, is_mail=True))
for item in self:
try:
......@@ -1249,127 +1249,137 @@ class CcBl(models.Model):
user_obj = self.env['res.users'].search([('login', '=', pda_db_user)], limit=1)
ship_package_ids = [ship_package_dict for sublist in [d['id'] for d in ship_packages] for
ship_package_dict in sublist]
tally_state = 'checked_goods' if action_type == 'tally' else (
'picked_up' if action_type == 'pickup' else 'handover_completed')
# 后续节点
node_obj = self.env['cc.node'].sudo().search([
('node_type', '=', 'package'),
('tally_state', '=', tally_state) # 检查理货或尾程交接的节点,根据排序进行升序
], order='seq asc')
if node_obj:
all_ship_package_obj = self.env['cc.ship.package'].search(
[('id', 'in', ship_package_ids)]) # 所有小包
# 预先获取所有同步日志 - 批量查询
all_sync_logs = self.env['cc.ship.package.sync.log'].sudo().search([
('package_id', 'in', ship_package_ids)
])
# 构建同步日志字典以加快查找
sync_log_dict = {}
for log in all_sync_logs:
if log.package_id.id not in sync_log_dict:
sync_log_dict[log.package_id.id] = set()
sync_log_dict[log.package_id.id].add(log.process_code)
# 构建ship_packages字典,用于快速查找
ship_packages_dict = {}
for package in ship_packages:
# 如果一个id在多个package中出现,使用最后一个package的tally_time
if package.get('tally_time'):
for single_id in package['id']:
ship_packages_dict[single_id] = package['tally_time']
# 前序节点 理货或尾程交接之前没有生成的节点
before_node_obj = node_obj[0].get_before_node()
# 理货或尾程交接之前没有生成的节点
for before_node in before_node_obj:
before_minutes = before_node.calculate_total_interval(node_obj[0])
# 准备批量更新数据
update_data = []
for package in all_ship_package_obj:
package_id = package.id
if package_id not in sync_log_dict or before_node.tk_code not in sync_log_dict.get(
package_id, set()):
tally_time = ship_packages_dict.get(package_id)
if tally_time:
operation_time = (
datetime.strptime(tally_time, '%Y-%m-%d %H:%M:%S') - timedelta(
minutes=before_minutes)) if tally_time else fields.Datetime.now() - timedelta(
minutes=before_minutes)
update_data.append((
package_id,
before_node.id,
operation_time,
before_node.desc,
True if before_node.is_default else False
))
if update_data:
# 构建批量更新SQL
values_str = ','.join(
self.env.cr.mogrify("(%s,%s,%s,%s,%s)", row).decode('utf-8') for row in update_data)
sql = """
UPDATE cc_ship_package AS t SET
state = c.state,
process_time = c.process_time,
state_explain = c.state_explain,
is_sync = c.is_sync
FROM (VALUES
{}
) AS c(id, state, process_time, state_explain, is_sync)
WHERE t.id = c.id
""".format(values_str)
self.env.cr.execute(sql)
self._cr.commit() # 提交事务
item.try_callback_track(max_retries=2, ship_package_ids=ship_package_ids,
user_obj=user_obj)
# 理货或尾程交接的节点
# 预先获取所有状态节点
all_state_nodes = self.env['cc.node'].sudo().search([
('node_type', '=', 'package')
])
state_node_dict = {node.name: node for node in all_state_nodes}
next_minutes = int(self.env['ir.config_parameter'].sudo().get_param('next_minutes', default=20))
for index, node in enumerate(node_obj):
update_data = []
for package in all_ship_package_obj:
if package.state.name in state_node_dict:
current_state_node = state_node_dict[package.state.name]
if current_state_node.seq < node.seq:
tally_time = ship_packages_dict.get(package.id)
node_obj = False
all_ship_package_obj = self.env['cc.ship.package'].search(
[('id', 'in', ship_package_ids)]) # 所有小包
# 预先获取所有同步日志 - 批量查询
all_sync_logs = self.env['cc.ship.package.sync.log'].sudo().search([
('package_id', 'in', ship_package_ids)
])
# 构建同步日志字典以加快查找
sync_log_dict = {}
for log in all_sync_logs:
if log.package_id.id not in sync_log_dict:
sync_log_dict[log.package_id.id] = set()
sync_log_dict[log.package_id.id].add(log.process_code)
# 构建ship_packages字典,用于快速查找
ship_packages_dict = {}
for package in ship_packages:
# 如果一个id在多个package中出现,使用最后一个package的tally_time
if package.get('tally_time'):
for single_id in package['id']:
ship_packages_dict[single_id] = package['tally_time']
if action_type != 'push_match_node':
tally_state = 'checked_goods' if action_type == 'tally' else (
'picked_up' if action_type == 'pickup' else 'handover_completed')
# 后续节点
node_obj = self.env['cc.node'].sudo().search([
('node_type', '=', 'package'),
('tally_state', '=', tally_state) # 检查理货或尾程交接的节点,根据排序进行升序
], order='seq asc')
if node_obj:
# 前序节点 理货或尾程交接之前没有生成的节点
before_node_obj = node_obj[0].get_before_node()
# 理货或尾程交接之前没有生成的节点
for before_node in before_node_obj:
before_minutes = before_node.calculate_total_interval(node_obj[0])
# 准备批量更新数据
update_data = []
for package in all_ship_package_obj:
package_id = package.id
if package_id not in sync_log_dict or before_node.tk_code not in sync_log_dict.get(
package_id, set()):
tally_time = ship_packages_dict.get(package_id)
if tally_time:
operation_time = (
datetime.strptime(tally_time, '%Y-%m-%d %H:%M:%S') + timedelta(
minutes=next_minutes * index)) if tally_time else fields.Datetime.now() + timedelta(
minutes=next_minutes * index)
datetime.strptime(tally_time, '%Y-%m-%d %H:%M:%S') - timedelta(
minutes=before_minutes)) if tally_time else fields.Datetime.now() - timedelta(
minutes=before_minutes)
update_data.append((
package.id,
node.id,
package_id,
before_node.id,
operation_time,
node.desc,
True if node.is_default else False
before_node.desc,
True if before_node.is_default else False
))
if update_data:
# 构建批量更新SQL
values_str = ','.join(
self.env.cr.mogrify("(%s,%s,%s,%s,%s)", row).decode('utf-8') for row in update_data)
sql = """
UPDATE cc_ship_package AS t SET
state = c.state,
process_time = c.process_time,
state_explain = c.state_explain,
is_sync = c.is_sync
FROM (VALUES
{}
) AS c(id, state, process_time, state_explain, is_sync)
WHERE t.id = c.id
""".format(values_str)
self.env.cr.execute(sql)
self._cr.commit() # 提交事务
item.try_callback_track(max_retries=2, ship_package_ids=ship_package_ids,
user_obj=user_obj)
if update_data:
# 构建批量更新SQL
values_str = ','.join(
self.env.cr.mogrify("(%s,%s,%s,%s,%s)", row).decode('utf-8') for row in
update_data)
sql = """
UPDATE cc_ship_package AS t SET
state = c.state,
process_time = c.process_time,
state_explain = c.state_explain,
is_sync = c.is_sync
FROM (VALUES
{}
) AS c(id, state, process_time, state_explain, is_sync)
WHERE t.id = c.id
""".format(values_str)
self.env.cr.execute(sql)
self._cr.commit() # 提交事务
item.try_callback_track(max_retries=2, ship_package_ids=ship_package_ids,
user_obj=user_obj)
# 理货或尾程交接的节点
# 预先获取所有状态节点
if action_type == 'push_match_node' and pod_node_id:
#尾程匹配的节点包括前序节点
match_node_obj = self.env['cc.node'].sudo().search([('id', '=', pod_node_id)])
if match_node_obj:
node_obj = self.env['cc.node'].sudo().search([('node_type', '=', 'package'),('seq', '<=', match_node_obj.seq),('is_must', '=', True)], order='seq asc')
logging.info('node_obj: %s', node_obj)
if not node_obj:
return False
all_state_nodes = self.env['cc.node'].sudo().search([
('node_type', '=', 'package')
])
state_node_dict = {node.name: node for node in all_state_nodes}
next_minutes = int(self.env['ir.config_parameter'].sudo().get_param('next_minutes', default=20))
for index, node in enumerate(node_obj):
update_data = []
for package in all_ship_package_obj:
if package.state.name in state_node_dict:
current_state_node = state_node_dict[package.state.name]
if current_state_node.seq < node.seq:
tally_time = ship_packages_dict.get(package.id)
if tally_time:
operation_time = (
datetime.strptime(tally_time, '%Y-%m-%d %H:%M:%S') + timedelta(
minutes=next_minutes * index)) if tally_time else fields.Datetime.now() + timedelta(
minutes=next_minutes * index)
update_data.append((
package.id,
node.id,
operation_time,
node.desc,
True if node.is_default else False
))
if update_data:
# 构建批量更新SQL
values_str = ','.join(
self.env.cr.mogrify("(%s,%s,%s,%s,%s)", row).decode('utf-8') for row in update_data)
sql = """
UPDATE cc_ship_package AS t SET
state = c.state,
process_time = c.process_time,
state_explain = c.state_explain,
is_sync = c.is_sync
FROM (VALUES
{}
) AS c(id, state, process_time, state_explain, is_sync)
WHERE t.id = c.id
""".format(values_str)
self.env.cr.execute(sql)
self._cr.commit() # 提交事务
item.try_callback_track(max_retries=2, ship_package_ids=ship_package_ids,
user_obj=user_obj)
except Exception as err:
logging.error('fetch_mail_dlv--error:%s' % str(err))
......
......@@ -42,10 +42,11 @@ class Order_dispose(object):
bl_record = bl_obj.browse(bl_ids)
else:
bl_record = bl_obj.browse(data['id'])
# utc_time = datetime.strptime(data['utc_time'], "%Y-%m-%d %H:%M:%S")
utc_time = data.get('utc_time')
user_login = data.get('user_login')
bl_record.mail_auto_push(utc_time, ship_packages, action_type, user_login, config.pda_db_user)
pod_node_id = data.get('pod_node_id')
bl_record.mail_auto_push(utc_time, ship_packages, action_type, user_login, config.pda_db_user,
pod_node_id=pod_node_id)
except Exception as ex:
logging.error('mail_auto_push error:%s' % str(ex))
return res_data
......@@ -54,7 +55,7 @@ class Order_dispose(object):
try:
pool = redis.ConnectionPool(**config.redis_options)
r = redis.Redis(connection_pool=pool)
logging.info(u'redis连接成功')
logging.info(u'redis connection success')
Order_dispose = Order_dispose()
while 1:
try:
......@@ -65,4 +66,4 @@ try:
logging.error(e)
continue
except Exception as e:
logging.error("登录失败:%s" % e)
logging.error("login failed:%s" % e)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论