提交 c7aafbd0 authored 作者: 贺阳's avatar 贺阳

若该提单里已有对应的小包已有节点推送日志,则不再重新推送;还要推送前序节点

上级 4b2b1e33
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
import base64 import base64
import io import io
import json
import logging import logging
import time import time
...@@ -55,7 +56,7 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -55,7 +56,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
""" """
Confirm operation # 确认操作 Confirm operation # 确认操作
""" """
#计算整个过程的耗时 # 计算整个过程的耗时
start_time = time.time() start_time = time.time()
_logger.info(f"开始执行批量获取POD信息操作") _logger.info(f"开始执行批量获取POD信息操作")
bl_objs = self.get_order() bl_objs = self.get_order()
...@@ -88,10 +89,10 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -88,10 +89,10 @@ class BatchGetPodInfoWizard(models.TransientModel):
# 再同步和回写 # 再同步和回写
if self.sync_last_mile_pod and processed_files: if self.sync_last_mile_pod and processed_files:
self._sync_last_mile_pod(processed_files) self._sync_last_mile_pod(processed_files)
# 同步推送匹配节点 # 同步推送匹配节点
if self.sync_match_node and processed_files: if self.sync_match_node and processed_files:
self._sync_match_node(processed_files) self.get_date_sync_match_node(processed_files)
end_time = time.time() end_time = time.time()
_logger.info(f"批量获取POD信息操作完成,耗时: {end_time - start_time}秒") _logger.info(f"批量获取POD信息操作完成,耗时: {end_time - start_time}秒")
...@@ -257,11 +258,11 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -257,11 +258,11 @@ class BatchGetPodInfoWizard(models.TransientModel):
if file_data: if file_data:
# 将base64数据转换为二进制 # 将base64数据转换为二进制
pdf_binary = base64.b64decode(file_data) pdf_binary = base64.b64decode(file_data)
# 先提取文本用于后续同步节点功能 # 先提取文本用于后续同步节点功能
if 'ocr_texts' not in file_info: if 'ocr_texts' not in file_info:
file_info['ocr_texts'] = self._extract_text_from_pdf_with_ocr(pdf_binary, bl.bl_no) file_info['ocr_texts'] = self._extract_text_from_pdf_with_ocr(pdf_binary, bl.bl_no)
# 使用OCR方法处理PDF # 使用OCR方法处理PDF
processed_pdf = self._process_pdf_with_ocr( processed_pdf = self._process_pdf_with_ocr(
pdf_data=pdf_binary, pdf_data=pdf_binary,
...@@ -290,30 +291,30 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -290,30 +291,30 @@ class BatchGetPodInfoWizard(models.TransientModel):
import pytesseract import pytesseract
import numpy as np import numpy as np
from PIL import Image from PIL import Image
# 尝试导入OpenCV,如果失败则使用PIL替代 # 尝试导入OpenCV,如果失败则使用PIL替代
try: try:
import cv2 import cv2
cv2_available = True cv2_available = True
except ImportError: except ImportError:
cv2_available = False cv2_available = False
# 设置Tesseract路径 # 设置Tesseract路径
self._setup_tesseract_path() self._setup_tesseract_path()
# 打开PDF文档 # 打开PDF文档
pdf_document = fitz.open(stream=pdf_binary, filetype="pdf") pdf_document = fitz.open(stream=pdf_binary, filetype="pdf")
page_texts = {} page_texts = {}
# 遍历每一页提取文本 # 遍历每一页提取文本
for page_num in range(len(pdf_document)): for page_num in range(len(pdf_document)):
page = pdf_document[page_num] page = pdf_document[page_num]
# 将页面转换为图像 # 将页面转换为图像
mat = fitz.Matrix(2.0, 2.0) # 提高分辨率 mat = fitz.Matrix(2.0, 2.0) # 提高分辨率
pix = page.get_pixmap(matrix=mat) pix = page.get_pixmap(matrix=mat)
img_data = pix.tobytes("png") img_data = pix.tobytes("png")
# 转换为PIL图像 # 转换为PIL图像
if cv2_available: if cv2_available:
nparr = np.frombuffer(img_data, np.uint8) nparr = np.frombuffer(img_data, np.uint8)
...@@ -323,10 +324,10 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -323,10 +324,10 @@ class BatchGetPodInfoWizard(models.TransientModel):
pil_img = Image.open(io.BytesIO(img_data)) pil_img = Image.open(io.BytesIO(img_data))
if pil_img.mode != 'RGB': if pil_img.mode != 'RGB':
pil_img = pil_img.convert('RGB') pil_img = pil_img.convert('RGB')
# OCR配置 # OCR配置
config = '--psm 6 --oem 1 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,- -c preserve_interword_spaces=1' config = '--psm 6 --oem 1 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,- -c preserve_interword_spaces=1'
# 使用Tesseract进行OCR识别 # 使用Tesseract进行OCR识别
try: try:
text_content = pytesseract.image_to_string(pil_img, config=config, lang='eng') text_content = pytesseract.image_to_string(pil_img, config=config, lang='eng')
...@@ -334,7 +335,7 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -334,7 +335,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
except Exception as e: except Exception as e:
_logger.error(f"OCR识别失败,第 {page_num + 1} 页: {str(e)}") _logger.error(f"OCR识别失败,第 {page_num + 1} 页: {str(e)}")
page_texts[page_num] = "" page_texts[page_num] = ""
pdf_document.close() pdf_document.close()
return page_texts return page_texts
...@@ -807,11 +808,22 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -807,11 +808,22 @@ class BatchGetPodInfoWizard(models.TransientModel):
_logger.error(f"保存PDF附件失败: {str(e)}") _logger.error(f"保存PDF附件失败: {str(e)}")
raise ValidationError(_('Failed to save PDF attachment: %s') % str(e)) raise ValidationError(_('Failed to save PDF attachment: %s') % str(e))
def _sync_match_node(self, processed_files): def get_date_sync_match_node(self, processed_files):
""" """
Sync matched node based on POD file, extract time from red boxes # 根据POD文件同步匹配节点 Sync matched node based on POD file, extract time from red boxes # 根据POD文件同步匹配节点
:param processed_files: 处理后的文件数组 :param processed_files: 处理后的文件数组
""" """
ship_packages, pod_node_id = self.get_detail_info(processed_files)
self._sync_match_node(ship_packages, pod_node_id)
def get_detail_info(self, processed_files):
"""
获取提单对应的节点以及时间
:param processed_files: 处理后的文件数组
:return: 提单对应的节点以及节点操作时间
"""
ship_packages = []
error_bl = []
# 查找对应的清关节点(勾选了POD节点匹配的节点) # 查找对应的清关节点(勾选了POD节点匹配的节点)
pod_node = self.env['cc.node'].search([ pod_node = self.env['cc.node'].search([
('is_pod_node', '=', True), ('is_pod_node', '=', True),
...@@ -824,37 +836,54 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -824,37 +836,54 @@ class BatchGetPodInfoWizard(models.TransientModel):
if not pod_node: if not pod_node:
_logger.info(f"未找到尾程POD节点匹配的节点,提单号: {bl.bl_no}") _logger.info(f"未找到尾程POD节点匹配的节点,提单号: {bl.bl_no}")
continue continue
# 从PDF文件提取红色框的时间 # 从PDF文件提取红色框的时间
file_data = file_info.get('file_data') file_data = file_info.get('file_data')
if not file_data: if not file_data:
_logger.warning(f"提单 {bl.bl_no} 没有文件数据") _logger.warning(f"提单 {bl.bl_no} 没有文件数据")
continue continue
try: try:
# 如果已识别过OCR文本,则复用 # 如果已识别过OCR文本,则复用
ocr_texts = file_info.get('ocr_texts') ocr_texts = file_info.get('ocr_texts')
# 解析PDF提取时间 # 解析PDF提取时间
extracted_times = self._extract_time_from_pdf(file_data, bl.bl_no, ocr_texts=ocr_texts) extracted_times = self._extract_time_from_pdf(file_data, bl.bl_no, ocr_texts=ocr_texts)
if extracted_times: if extracted_times:
# 取最早的时间作为节点操作时间 # 取最早的时间作为节点操作时间
earliest_time = min(extracted_times) earliest_time = min(extracted_times)
_logger.info(f"提取到最早时间: {earliest_time},将作为节点操作时间") _logger.info(f"提取到最早时间: {earliest_time},将作为节点操作时间")
ship_packages.append({
# 这里需要实现具体的节点操作逻辑 'bl_id': bl.id,
# 根据实际业务需求,可能需要更新某个字段或调用某个方法 'id': bl.ship_package_ids.ids,
# 例如:更新节点的操作时间或状态 'tally_time': str(earliest_time)
# pod_node.operation_time = earliest_time })
# 或者调用某个方法来记录节点操作
_logger.info(f"为提单 {bl.bl_no} 同步节点操作时间: {earliest_time}")
else: else:
_logger.info(f"未从POD文件中提取到时间信息,提单号: {bl.bl_no}") _logger.warning(f"提单 {bl.bl_no} 没有提取到时间信息")
error_bl.append(bl)
except Exception as e: except Exception as e:
_logger.error(f"同步匹配节点失败,提单号: {bl.bl_no}, 错误: {str(e)}") _logger.error(f"获取提单对应的节点以及时间失败,提单号: {bl.bl_no}, 错误: {str(e)}")
error_bl.append(bl)
if error_bl:
_logger.warning(f"提单 {', '.join([bl.bl_no for bl in error_bl])} 没有提取到时间信息")
raise ValidationError(
_('%s bill of loading cannot get node operation time,please manually upload push tk') % (
', '.join([bl.bl_no for bl in error_bl]))) # xx提单号没有获取到节点操作时间,请手动上传推送提单到TK
return ship_packages, pod_node.id
def _sync_match_node(self, ship_packages, pod_node_id):
"""
同步匹配节点
:param ship_packages: 提单对应的小包以及节点信息
:param pod_node_id: 尾程POD节点匹配的节点ID
"""
# 若该提单里已有对应的小包已有节点推送日志,则不再重新推送;
_logger.info(f"同步匹配节点,提单: {ship_packages}, 节点: {pod_node_id}")
if ship_packages:
bl_objs = self.env['cc.bl'].sudo().search(
[('id', 'in', [ship_package.get('bl_id') for ship_package in ship_packages])])
redis_conn = self.env['common.common'].sudo().get_redis()
if redis_conn and redis_conn != 'no' and pod_node_id:
redis_conn.lpush('mail_push_package_list', json.dumps(
{'ids': bl_objs.ids, 'ship_packages': str(ship_packages), 'action_type': 'push_match_node',
'pod_node_id': pod_node_id}))
def _extract_time_from_pdf(self, file_data, bl_no, ocr_texts=None): def _extract_time_from_pdf(self, file_data, bl_no, ocr_texts=None):
""" """
...@@ -867,24 +896,24 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -867,24 +896,24 @@ class BatchGetPodInfoWizard(models.TransientModel):
""" """
import re import re
from datetime import datetime from datetime import datetime
extracted_times = [] extracted_times = []
try: try:
# 如果没有提供OCR文本,则调用OCR识别 # 如果没有提供OCR文本,则调用OCR识别
if ocr_texts is None: if ocr_texts is None:
pdf_binary = base64.b64decode(file_data) pdf_binary = base64.b64decode(file_data)
ocr_texts = self._extract_text_from_pdf_with_ocr(pdf_binary, bl_no) ocr_texts = self._extract_text_from_pdf_with_ocr(pdf_binary, bl_no)
# 使用已识别的文本内容查找时间 # 使用已识别的文本内容查找时间
for page_num, text_content in ocr_texts.items(): for page_num, text_content in ocr_texts.items():
# 初始化本页时间列表 # 初始化本页时间列表
page_times = [] page_times = []
# 根据图片中的格式,优先提取 "DATE/TIME OF RELEASE" 后面的时间 # 根据图片中的格式,优先提取 "DATE/TIME OF RELEASE" 后面的时间
# 格式示例: "21:47 20-OCT-2025", "08:35 17-OCT-2025", "13:52 17-OCT-2025" # 格式示例: "21:47 20-OCT-2025", "08:35 17-OCT-2025", "13:52 17-OCT-2025"
# OCR识别时可能没有空格,如 "DATETIMEOFRELEASE160220-SEP-2025" # OCR识别时可能没有空格,如 "DATETIMEOFRELEASE160220-SEP-2025"
# 首先尝试找到 "DATE/TIME OF RELEASE" 后面的时间(红色框中的时间) # 首先尝试找到 "DATE/TIME OF RELEASE" 后面的时间(红色框中的时间)
# 匹配两种情况: # 匹配两种情况:
# 1. 有空格: "DATE/TIME OF RELEASE 16:02 20-SEP-2025" # 1. 有空格: "DATE/TIME OF RELEASE 16:02 20-SEP-2025"
...@@ -903,7 +932,7 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -903,7 +932,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
# OCR可能将:识别为多个空格,如"DATETIMEOFRELEASE 163420-SEP-2025" # OCR可能将:识别为多个空格,如"DATETIMEOFRELEASE 163420-SEP-2025"
r'DATETIMEOFRELEASE\s+(\d)(\d)(\d{2})(\d{2})-([A-Z]{3})-(\d{4})', r'DATETIMEOFRELEASE\s+(\d)(\d)(\d{2})(\d{2})-([A-Z]{3})-(\d{4})',
] ]
for idx, pattern in enumerate(release_time_patterns): for idx, pattern in enumerate(release_time_patterns):
release_time_match = re.search(pattern, text_content, re.IGNORECASE) release_time_match = re.search(pattern, text_content, re.IGNORECASE)
if release_time_match: if release_time_match:
...@@ -913,18 +942,19 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -913,18 +942,19 @@ class BatchGetPodInfoWizard(models.TransientModel):
# 有空格的情况: ('16:02', '20-SEP-2025') # 有空格的情况: ('16:02', '20-SEP-2025')
time_part = release_time_match.group(1) time_part = release_time_match.group(1)
date_part = release_time_match.group(2) date_part = release_time_match.group(2)
# 转换月份缩写为大写(Python strptime需要) # 转换月份缩写为大写(Python strptime需要)
month_map = { month_map = {
'JAN': 'Jan', 'FEB': 'Feb', 'MAR': 'Mar', 'APR': 'Apr', 'JAN': 'Jan', 'FEB': 'Feb', 'MAR': 'Mar', 'APR': 'Apr',
'MAY': 'May', 'JUN': 'Jun', 'JUL': 'Jul', 'AUG': 'Aug', 'MAY': 'May', 'JUN': 'Jun', 'JUL': 'Jul', 'AUG': 'Aug',
'SEP': 'Sep', 'OCT': 'Oct', 'NOV': 'Nov', 'DEC': 'Dec' 'SEP': 'Sep', 'OCT': 'Oct', 'NOV': 'Nov', 'DEC': 'Dec'
} }
# 如果日期部分中的月份是大写,转换为首字母大写 # 如果日期部分中的月份是大写,转换为首字母大写
for key, value in month_map.items(): for key, value in month_map.items():
date_part = re.sub(r'-{}\b'.format(key), f'-{value}', date_part, flags=re.IGNORECASE) date_part = re.sub(r'-{}\b'.format(key), f'-{value}', date_part,
flags=re.IGNORECASE)
match_str = f"{time_part} {date_part}" match_str = f"{time_part} {date_part}"
time_obj = datetime.strptime(match_str, '%H:%M %d-%b-%Y') time_obj = datetime.strptime(match_str, '%H:%M %d-%b-%Y')
elif group_count == 5: elif group_count == 5:
...@@ -934,7 +964,7 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -934,7 +964,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
day = release_time_match.group(3) day = release_time_match.group(3)
month = release_time_match.group(4) month = release_time_match.group(4)
year = release_time_match.group(5) year = release_time_match.group(5)
# 转换月份缩写 # 转换月份缩写
month_map = { month_map = {
'JAN': 'Jan', 'FEB': 'Feb', 'MAR': 'Mar', 'APR': 'Apr', 'JAN': 'Jan', 'FEB': 'Feb', 'MAR': 'Mar', 'APR': 'Apr',
...@@ -942,10 +972,11 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -942,10 +972,11 @@ class BatchGetPodInfoWizard(models.TransientModel):
'SEP': 'Sep', 'OCT': 'Oct', 'NOV': 'Nov', 'DEC': 'Dec' 'SEP': 'Sep', 'OCT': 'Oct', 'NOV': 'Nov', 'DEC': 'Dec'
} }
month_normalized = month_map.get(month.upper(), month.capitalize()) month_normalized = month_map.get(month.upper(), month.capitalize())
# 直接手动创建datetime对象,避免strptime的格式问题 # 直接手动创建datetime对象,避免strptime的格式问题
month_num = {'JAN':1,'FEB':2,'MAR':3,'APR':4,'MAY':5,'JUN':6, month_num = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,
'JUL':7,'AUG':8,'SEP':9,'OCT':10,'NOV':11,'DEC':12}[month.upper()] 'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12}[
month.upper()]
time_obj = datetime(int(year), month_num, int(day), int(hour), int(minute)) time_obj = datetime(int(year), month_num, int(day), int(hour), int(minute))
match_str = f"{hour}:{minute} {day.zfill(2)}-{month_normalized}-{year}" match_str = f"{hour}:{minute} {day.zfill(2)}-{month_normalized}-{year}"
elif group_count == 6: elif group_count == 6:
...@@ -956,27 +987,29 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -956,27 +987,29 @@ class BatchGetPodInfoWizard(models.TransientModel):
day = release_time_match.group(4) day = release_time_match.group(4)
month = release_time_match.group(5) month = release_time_match.group(5)
year = release_time_match.group(6) year = release_time_match.group(6)
# 组合小时和分钟 # 组合小时和分钟
hour = hour_tens + hour_ones hour = hour_tens + hour_ones
minute = minute_str minute = minute_str
# 直接手动创建datetime对象 # 直接手动创建datetime对象
month_num = {'JAN':1,'FEB':2,'MAR':3,'APR':4,'MAY':5,'JUN':6, month_num = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,
'JUL':7,'AUG':8,'SEP':9,'OCT':10,'NOV':11,'DEC':12}[month.upper()] 'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12}[
month.upper()]
time_obj = datetime(int(year), month_num, int(day), int(hour), int(minute)) time_obj = datetime(int(year), month_num, int(day), int(hour), int(minute))
match_str = f"{hour}:{minute} {day.zfill(2)}-{month}-{year}" match_str = f"{hour}:{minute} {day.zfill(2)}-{month}-{year}"
else: else:
_logger.warning(f"意外的分组数量: {group_count}, 分组: {release_time_match.groups()}") _logger.warning(f"意外的分组数量: {group_count}, 分组: {release_time_match.groups()}")
continue continue
page_times.append(time_obj) page_times.append(time_obj)
break break
except Exception as e: except Exception as e:
_logger.warning(f"解析DATE/TIME OF RELEASE时间失败: {release_time_match.groups()}, 错误: {str(e)}") _logger.warning(
f"解析DATE/TIME OF RELEASE时间失败: {release_time_match.groups()}, 错误: {str(e)}")
else: else:
continue continue
# 然后查找其他时间格式 # 然后查找其他时间格式
time_patterns = [ time_patterns = [
# RELEASE NOTE格式: HH:MM DD-MON-YYYY # RELEASE NOTE格式: HH:MM DD-MON-YYYY
...@@ -986,7 +1019,7 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -986,7 +1019,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
r'\d{2}/\d{2}/\d{4}\s+\d{2}:\d{2}', # 31/12/2023 12:30 r'\d{2}/\d{2}/\d{4}\s+\d{2}:\d{2}', # 31/12/2023 12:30
r'\d{4}/\d{2}/\d{2}\s+\d{2}:\d{2}:\d{2}', # 2023/12/31 12:30:00 r'\d{4}/\d{2}/\d{2}\s+\d{2}:\d{2}:\d{2}', # 2023/12/31 12:30:00
] ]
for pattern in time_patterns: for pattern in time_patterns:
matches = re.findall(pattern, text_content) matches = re.findall(pattern, text_content)
for match in matches: for match in matches:
...@@ -1015,22 +1048,22 @@ class BatchGetPodInfoWizard(models.TransientModel): ...@@ -1015,22 +1048,22 @@ class BatchGetPodInfoWizard(models.TransientModel):
continue continue
else: else:
continue continue
except Exception as e: except Exception as e:
_logger.warning(f"解析时间失败: {match}, 错误: {str(e)}") _logger.warning(f"解析时间失败: {match}, 错误: {str(e)}")
continue continue
# 如果本页有提取到时间,记录最早的时间 # 如果本页有提取到时间,记录最早的时间
if page_times: if page_times:
earliest_page_time = min(page_times) earliest_page_time = min(page_times)
extracted_times.append(earliest_page_time) extracted_times.append(earliest_page_time)
# 如果有提取到时间,返回最早的时间 # 如果有提取到时间,返回最早的时间
if extracted_times: if extracted_times:
earliest_time = min(extracted_times) earliest_time = min(extracted_times)
return [earliest_time] return [earliest_time]
except Exception as e: except Exception as e:
_logger.error(f"提取PDF时间信息失败,提单号: {bl_no}, 错误: {str(e)}") _logger.error(f"提取PDF时间信息失败,提单号: {bl_no}, 错误: {str(e)}")
return extracted_times return extracted_times
...@@ -457,7 +457,7 @@ class CcBl(models.Model): ...@@ -457,7 +457,7 @@ class CcBl(models.Model):
raise ValidationError( raise ValidationError(
_('The small package node or bill of lading node is not in the completed node, and the bill of lading cannot be changed to completed!')) # 小包节点或提单节点不在已完成节点,提单不能变为已完成! _('The small package node or bill of lading node is not in the completed node, and the bill of lading cannot be changed to completed!')) # 小包节点或提单节点不在已完成节点,提单不能变为已完成!
def done_func(self, is_email=False,**kwargs): def done_func(self, is_email=False, **kwargs):
""" """
变为已完成.先进行提单巡查,再进行提单状态变更 变为已完成.先进行提单巡查,再进行提单状态变更
""" """
...@@ -475,7 +475,7 @@ class CcBl(models.Model): ...@@ -475,7 +475,7 @@ class CcBl(models.Model):
content = self.get_patrol_email_content(result) content = self.get_patrol_email_content(result)
raise ValidationError(content) raise ValidationError(content)
if is_success or kwargs.get('exception_reason'): if is_success or kwargs.get('exception_reason'):
super(CcBl, self).done_func(is_email=is_email,**kwargs) super(CcBl, self).done_func(is_email=is_email, **kwargs)
def check_bl_patrol(self): def check_bl_patrol(self):
""" """
...@@ -1226,7 +1226,7 @@ class CcBl(models.Model): ...@@ -1226,7 +1226,7 @@ class CcBl(models.Model):
return False return False
def mail_auto_push(self, mail_time=False, tally_ship_packages=[], action_type='tally', mail_db_user='邮件接收', def mail_auto_push(self, mail_time=False, tally_ship_packages=[], action_type='tally', mail_db_user='邮件接收',
pda_db_user='pda'): pda_db_user='pda', pod_node_id=False):
self = self.with_context(dict(self._context, is_mail=True)) self = self.with_context(dict(self._context, is_mail=True))
for item in self: for item in self:
try: try:
...@@ -1249,127 +1249,137 @@ class CcBl(models.Model): ...@@ -1249,127 +1249,137 @@ class CcBl(models.Model):
user_obj = self.env['res.users'].search([('login', '=', pda_db_user)], limit=1) user_obj = self.env['res.users'].search([('login', '=', pda_db_user)], limit=1)
ship_package_ids = [ship_package_dict for sublist in [d['id'] for d in ship_packages] for ship_package_ids = [ship_package_dict for sublist in [d['id'] for d in ship_packages] for
ship_package_dict in sublist] ship_package_dict in sublist]
tally_state = 'checked_goods' if action_type == 'tally' else ( node_obj = False
'picked_up' if action_type == 'pickup' else 'handover_completed') all_ship_package_obj = self.env['cc.ship.package'].search(
# 后续节点 [('id', 'in', ship_package_ids)]) # 所有小包
node_obj = self.env['cc.node'].sudo().search([ # 预先获取所有同步日志 - 批量查询
('node_type', '=', 'package'), all_sync_logs = self.env['cc.ship.package.sync.log'].sudo().search([
('tally_state', '=', tally_state) # 检查理货或尾程交接的节点,根据排序进行升序 ('package_id', 'in', ship_package_ids)
], order='seq asc') ])
if node_obj:
all_ship_package_obj = self.env['cc.ship.package'].search( # 构建同步日志字典以加快查找
[('id', 'in', ship_package_ids)]) # 所有小包 sync_log_dict = {}
# 预先获取所有同步日志 - 批量查询 for log in all_sync_logs:
all_sync_logs = self.env['cc.ship.package.sync.log'].sudo().search([ if log.package_id.id not in sync_log_dict:
('package_id', 'in', ship_package_ids) sync_log_dict[log.package_id.id] = set()
]) sync_log_dict[log.package_id.id].add(log.process_code)
# 构建同步日志字典以加快查找 # 构建ship_packages字典,用于快速查找
sync_log_dict = {} ship_packages_dict = {}
for log in all_sync_logs: for package in ship_packages:
if log.package_id.id not in sync_log_dict: # 如果一个id在多个package中出现,使用最后一个package的tally_time
sync_log_dict[log.package_id.id] = set() if package.get('tally_time'):
sync_log_dict[log.package_id.id].add(log.process_code) for single_id in package['id']:
ship_packages_dict[single_id] = package['tally_time']
# 构建ship_packages字典,用于快速查找 if action_type != 'push_match_node':
ship_packages_dict = {} tally_state = 'checked_goods' if action_type == 'tally' else (
for package in ship_packages: 'picked_up' if action_type == 'pickup' else 'handover_completed')
# 如果一个id在多个package中出现,使用最后一个package的tally_time # 后续节点
if package.get('tally_time'): node_obj = self.env['cc.node'].sudo().search([
for single_id in package['id']: ('node_type', '=', 'package'),
ship_packages_dict[single_id] = package['tally_time'] ('tally_state', '=', tally_state) # 检查理货或尾程交接的节点,根据排序进行升序
], order='seq asc')
# 前序节点 理货或尾程交接之前没有生成的节点 if node_obj:
before_node_obj = node_obj[0].get_before_node() # 前序节点 理货或尾程交接之前没有生成的节点
# 理货或尾程交接之前没有生成的节点 before_node_obj = node_obj[0].get_before_node()
for before_node in before_node_obj: # 理货或尾程交接之前没有生成的节点
before_minutes = before_node.calculate_total_interval(node_obj[0]) for before_node in before_node_obj:
# 准备批量更新数据 before_minutes = before_node.calculate_total_interval(node_obj[0])
update_data = [] # 准备批量更新数据
for package in all_ship_package_obj: update_data = []
package_id = package.id for package in all_ship_package_obj:
if package_id not in sync_log_dict or before_node.tk_code not in sync_log_dict.get( package_id = package.id
package_id, set()): if package_id not in sync_log_dict or before_node.tk_code not in sync_log_dict.get(
tally_time = ship_packages_dict.get(package_id) package_id, set()):
if tally_time: tally_time = ship_packages_dict.get(package_id)
operation_time = (
datetime.strptime(tally_time, '%Y-%m-%d %H:%M:%S') - timedelta(
minutes=before_minutes)) if tally_time else fields.Datetime.now() - timedelta(
minutes=before_minutes)
update_data.append((
package_id,
before_node.id,
operation_time,
before_node.desc,
True if before_node.is_default else False
))
if update_data:
# 构建批量更新SQL
values_str = ','.join(
self.env.cr.mogrify("(%s,%s,%s,%s,%s)", row).decode('utf-8') for row in update_data)
sql = """
UPDATE cc_ship_package AS t SET
state = c.state,
process_time = c.process_time,
state_explain = c.state_explain,
is_sync = c.is_sync
FROM (VALUES
{}
) AS c(id, state, process_time, state_explain, is_sync)
WHERE t.id = c.id
""".format(values_str)
self.env.cr.execute(sql)
self._cr.commit() # 提交事务
item.try_callback_track(max_retries=2, ship_package_ids=ship_package_ids,
user_obj=user_obj)
# 理货或尾程交接的节点
# 预先获取所有状态节点
all_state_nodes = self.env['cc.node'].sudo().search([
('node_type', '=', 'package')
])
state_node_dict = {node.name: node for node in all_state_nodes}
next_minutes = int(self.env['ir.config_parameter'].sudo().get_param('next_minutes', default=20))
for index, node in enumerate(node_obj):
update_data = []
for package in all_ship_package_obj:
if package.state.name in state_node_dict:
current_state_node = state_node_dict[package.state.name]
if current_state_node.seq < node.seq:
tally_time = ship_packages_dict.get(package.id)
if tally_time: if tally_time:
operation_time = ( operation_time = (
datetime.strptime(tally_time, '%Y-%m-%d %H:%M:%S') + timedelta( datetime.strptime(tally_time, '%Y-%m-%d %H:%M:%S') - timedelta(
minutes=next_minutes * index)) if tally_time else fields.Datetime.now() + timedelta( minutes=before_minutes)) if tally_time else fields.Datetime.now() - timedelta(
minutes=next_minutes * index) minutes=before_minutes)
update_data.append(( update_data.append((
package.id, package_id,
node.id, before_node.id,
operation_time, operation_time,
node.desc, before_node.desc,
True if node.is_default else False True if before_node.is_default else False
)) ))
if update_data:
# 构建批量更新SQL if update_data:
values_str = ','.join( # 构建批量更新SQL
self.env.cr.mogrify("(%s,%s,%s,%s,%s)", row).decode('utf-8') for row in update_data) values_str = ','.join(
sql = """ self.env.cr.mogrify("(%s,%s,%s,%s,%s)", row).decode('utf-8') for row in
UPDATE cc_ship_package AS t SET update_data)
state = c.state, sql = """
process_time = c.process_time, UPDATE cc_ship_package AS t SET
state_explain = c.state_explain, state = c.state,
is_sync = c.is_sync process_time = c.process_time,
FROM (VALUES state_explain = c.state_explain,
{} is_sync = c.is_sync
) AS c(id, state, process_time, state_explain, is_sync) FROM (VALUES
WHERE t.id = c.id {}
""".format(values_str) ) AS c(id, state, process_time, state_explain, is_sync)
WHERE t.id = c.id
self.env.cr.execute(sql) """.format(values_str)
self._cr.commit() # 提交事务
item.try_callback_track(max_retries=2, ship_package_ids=ship_package_ids, self.env.cr.execute(sql)
user_obj=user_obj) self._cr.commit() # 提交事务
item.try_callback_track(max_retries=2, ship_package_ids=ship_package_ids,
user_obj=user_obj)
# 理货或尾程交接的节点
# 预先获取所有状态节点
if action_type == 'push_match_node' and pod_node_id:
#尾程匹配的节点包括前序节点
match_node_obj = self.env['cc.node'].sudo().search([('id', '=', pod_node_id)])
if match_node_obj:
node_obj = self.env['cc.node'].sudo().search([('node_type', '=', 'package'),('seq', '<=', match_node_obj.seq),('is_must', '=', True)], order='seq asc')
logging.info('node_obj: %s', node_obj)
if not node_obj:
return False
all_state_nodes = self.env['cc.node'].sudo().search([
('node_type', '=', 'package')
])
state_node_dict = {node.name: node for node in all_state_nodes}
next_minutes = int(self.env['ir.config_parameter'].sudo().get_param('next_minutes', default=20))
for index, node in enumerate(node_obj):
update_data = []
for package in all_ship_package_obj:
if package.state.name in state_node_dict:
current_state_node = state_node_dict[package.state.name]
if current_state_node.seq < node.seq:
tally_time = ship_packages_dict.get(package.id)
if tally_time:
operation_time = (
datetime.strptime(tally_time, '%Y-%m-%d %H:%M:%S') + timedelta(
minutes=next_minutes * index)) if tally_time else fields.Datetime.now() + timedelta(
minutes=next_minutes * index)
update_data.append((
package.id,
node.id,
operation_time,
node.desc,
True if node.is_default else False
))
if update_data:
# 构建批量更新SQL
values_str = ','.join(
self.env.cr.mogrify("(%s,%s,%s,%s,%s)", row).decode('utf-8') for row in update_data)
sql = """
UPDATE cc_ship_package AS t SET
state = c.state,
process_time = c.process_time,
state_explain = c.state_explain,
is_sync = c.is_sync
FROM (VALUES
{}
) AS c(id, state, process_time, state_explain, is_sync)
WHERE t.id = c.id
""".format(values_str)
self.env.cr.execute(sql)
self._cr.commit() # 提交事务
item.try_callback_track(max_retries=2, ship_package_ids=ship_package_ids,
user_obj=user_obj)
except Exception as err: except Exception as err:
logging.error('fetch_mail_dlv--error:%s' % str(err)) logging.error('fetch_mail_dlv--error:%s' % str(err))
......
...@@ -42,10 +42,11 @@ class Order_dispose(object): ...@@ -42,10 +42,11 @@ class Order_dispose(object):
bl_record = bl_obj.browse(bl_ids) bl_record = bl_obj.browse(bl_ids)
else: else:
bl_record = bl_obj.browse(data['id']) bl_record = bl_obj.browse(data['id'])
# utc_time = datetime.strptime(data['utc_time'], "%Y-%m-%d %H:%M:%S")
utc_time = data.get('utc_time') utc_time = data.get('utc_time')
user_login = data.get('user_login') user_login = data.get('user_login')
bl_record.mail_auto_push(utc_time, ship_packages, action_type, user_login, config.pda_db_user) pod_node_id = data.get('pod_node_id')
bl_record.mail_auto_push(utc_time, ship_packages, action_type, user_login, config.pda_db_user,
pod_node_id=pod_node_id)
except Exception as ex: except Exception as ex:
logging.error('mail_auto_push error:%s' % str(ex)) logging.error('mail_auto_push error:%s' % str(ex))
return res_data return res_data
...@@ -54,7 +55,7 @@ class Order_dispose(object): ...@@ -54,7 +55,7 @@ class Order_dispose(object):
try: try:
pool = redis.ConnectionPool(**config.redis_options) pool = redis.ConnectionPool(**config.redis_options)
r = redis.Redis(connection_pool=pool) r = redis.Redis(connection_pool=pool)
logging.info(u'redis连接成功') logging.info(u'redis connection success')
Order_dispose = Order_dispose() Order_dispose = Order_dispose()
while 1: while 1:
try: try:
...@@ -65,4 +66,4 @@ try: ...@@ -65,4 +66,4 @@ try:
logging.error(e) logging.error(e)
continue continue
except Exception as e: except Exception as e:
logging.error("登录失败:%s" % e) logging.error("login failed:%s" % e)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论