提交 c7aafbd0 authored 作者: 贺阳's avatar 贺阳

若该提单里已有对应的小包已有节点推送日志,则不再重新推送;还要推送前序节点

上级 4b2b1e33
......@@ -3,6 +3,7 @@
import base64
import io
import json
import logging
import time
......@@ -55,7 +56,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
"""
Confirm operation # 确认操作
"""
#计算整个过程的耗时
# 计算整个过程的耗时
start_time = time.time()
_logger.info(f"开始执行批量获取POD信息操作")
bl_objs = self.get_order()
......@@ -91,7 +92,7 @@ class BatchGetPodInfoWizard(models.TransientModel):
# 同步推送匹配节点
if self.sync_match_node and processed_files:
self._sync_match_node(processed_files)
self.get_date_sync_match_node(processed_files)
end_time = time.time()
_logger.info(f"批量获取POD信息操作完成,耗时: {end_time - start_time}秒")
......@@ -807,11 +808,22 @@ class BatchGetPodInfoWizard(models.TransientModel):
_logger.error(f"保存PDF附件失败: {str(e)}")
raise ValidationError(_('Failed to save PDF attachment: %s') % str(e))
def _sync_match_node(self, processed_files):
def get_date_sync_match_node(self, processed_files):
"""
Sync matched node based on POD file, extract time from red boxes # 根据POD文件同步匹配节点
:param processed_files: 处理后的文件数组
"""
ship_packages, pod_node_id = self.get_detail_info(processed_files)
self._sync_match_node(ship_packages, pod_node_id)
def get_detail_info(self, processed_files):
"""
获取提单对应的节点以及时间
:param processed_files: 处理后的文件数组
:return: 提单对应的节点以及节点操作时间
"""
ship_packages = []
error_bl = []
# 查找对应的清关节点(勾选了POD节点匹配的节点)
pod_node = self.env['cc.node'].search([
('is_pod_node', '=', True),
......@@ -824,37 +836,54 @@ class BatchGetPodInfoWizard(models.TransientModel):
if not pod_node:
_logger.info(f"未找到尾程POD节点匹配的节点,提单号: {bl.bl_no}")
continue
# 从PDF文件提取红色框的时间
file_data = file_info.get('file_data')
if not file_data:
_logger.warning(f"提单 {bl.bl_no} 没有文件数据")
continue
try:
# 如果已识别过OCR文本,则复用
ocr_texts = file_info.get('ocr_texts')
# 解析PDF提取时间
extracted_times = self._extract_time_from_pdf(file_data, bl.bl_no, ocr_texts=ocr_texts)
if extracted_times:
# 取最早的时间作为节点操作时间
earliest_time = min(extracted_times)
_logger.info(f"提取到最早时间: {earliest_time},将作为节点操作时间")
# 这里需要实现具体的节点操作逻辑
# 根据实际业务需求,可能需要更新某个字段或调用某个方法
# 例如:更新节点的操作时间或状态
# pod_node.operation_time = earliest_time
# 或者调用某个方法来记录节点操作
_logger.info(f"为提单 {bl.bl_no} 同步节点操作时间: {earliest_time}")
ship_packages.append({
'bl_id': bl.id,
'id': bl.ship_package_ids.ids,
'tally_time': str(earliest_time)
})
else:
_logger.info(f"未从POD文件中提取到时间信息,提单号: {bl.bl_no}")
_logger.warning(f"提单 {bl.bl_no} 没有提取到时间信息")
error_bl.append(bl)
except Exception as e:
_logger.error(f"同步匹配节点失败,提单号: {bl.bl_no}, 错误: {str(e)}")
_logger.error(f"获取提单对应的节点以及时间失败,提单号: {bl.bl_no}, 错误: {str(e)}")
error_bl.append(bl)
if error_bl:
_logger.warning(f"提单 {', '.join([bl.bl_no for bl in error_bl])} 没有提取到时间信息")
raise ValidationError(
_('%s bill of loading cannot get node operation time,please manually upload push tk') % (
', '.join([bl.bl_no for bl in error_bl]))) # xx提单号没有获取到节点操作时间,请手动上传推送提单到TK
return ship_packages, pod_node.id
def _sync_match_node(self, ship_packages, pod_node_id):
"""
同步匹配节点
:param ship_packages: 提单对应的小包以及节点信息
:param pod_node_id: 尾程POD节点匹配的节点ID
"""
# 若该提单里已有对应的小包已有节点推送日志,则不再重新推送;
_logger.info(f"同步匹配节点,提单: {ship_packages}, 节点: {pod_node_id}")
if ship_packages:
bl_objs = self.env['cc.bl'].sudo().search(
[('id', 'in', [ship_package.get('bl_id') for ship_package in ship_packages])])
redis_conn = self.env['common.common'].sudo().get_redis()
if redis_conn and redis_conn != 'no' and pod_node_id:
redis_conn.lpush('mail_push_package_list', json.dumps(
{'ids': bl_objs.ids, 'ship_packages': str(ship_packages), 'action_type': 'push_match_node',
'pod_node_id': pod_node_id}))
def _extract_time_from_pdf(self, file_data, bl_no, ocr_texts=None):
"""
......@@ -923,7 +952,8 @@ class BatchGetPodInfoWizard(models.TransientModel):
# 如果日期部分中的月份是大写,转换为首字母大写
for key, value in month_map.items():
date_part = re.sub(r'-{}\b'.format(key), f'-{value}', date_part, flags=re.IGNORECASE)
date_part = re.sub(r'-{}\b'.format(key), f'-{value}', date_part,
flags=re.IGNORECASE)
match_str = f"{time_part} {date_part}"
time_obj = datetime.strptime(match_str, '%H:%M %d-%b-%Y')
......@@ -944,8 +974,9 @@ class BatchGetPodInfoWizard(models.TransientModel):
month_normalized = month_map.get(month.upper(), month.capitalize())
# 直接手动创建datetime对象,避免strptime的格式问题
month_num = {'JAN':1,'FEB':2,'MAR':3,'APR':4,'MAY':5,'JUN':6,
'JUL':7,'AUG':8,'SEP':9,'OCT':10,'NOV':11,'DEC':12}[month.upper()]
month_num = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,
'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12}[
month.upper()]
time_obj = datetime(int(year), month_num, int(day), int(hour), int(minute))
match_str = f"{hour}:{minute} {day.zfill(2)}-{month_normalized}-{year}"
elif group_count == 6:
......@@ -962,8 +993,9 @@ class BatchGetPodInfoWizard(models.TransientModel):
minute = minute_str
# 直接手动创建datetime对象
month_num = {'JAN':1,'FEB':2,'MAR':3,'APR':4,'MAY':5,'JUN':6,
'JUL':7,'AUG':8,'SEP':9,'OCT':10,'NOV':11,'DEC':12}[month.upper()]
month_num = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,
'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12}[
month.upper()]
time_obj = datetime(int(year), month_num, int(day), int(hour), int(minute))
match_str = f"{hour}:{minute} {day.zfill(2)}-{month}-{year}"
else:
......@@ -973,7 +1005,8 @@ class BatchGetPodInfoWizard(models.TransientModel):
page_times.append(time_obj)
break
except Exception as e:
_logger.warning(f"解析DATE/TIME OF RELEASE时间失败: {release_time_match.groups()}, 错误: {str(e)}")
_logger.warning(
f"解析DATE/TIME OF RELEASE时间失败: {release_time_match.groups()}, 错误: {str(e)}")
else:
continue
......
......@@ -457,7 +457,7 @@ class CcBl(models.Model):
raise ValidationError(
_('The small package node or bill of lading node is not in the completed node, and the bill of lading cannot be changed to completed!')) # 小包节点或提单节点不在已完成节点,提单不能变为已完成!
def done_func(self, is_email=False,**kwargs):
def done_func(self, is_email=False, **kwargs):
"""
变为已完成.先进行提单巡查,再进行提单状态变更
"""
......@@ -475,7 +475,7 @@ class CcBl(models.Model):
content = self.get_patrol_email_content(result)
raise ValidationError(content)
if is_success or kwargs.get('exception_reason'):
super(CcBl, self).done_func(is_email=is_email,**kwargs)
super(CcBl, self).done_func(is_email=is_email, **kwargs)
def check_bl_patrol(self):
"""
......@@ -1226,7 +1226,7 @@ class CcBl(models.Model):
return False
def mail_auto_push(self, mail_time=False, tally_ship_packages=[], action_type='tally', mail_db_user='邮件接收',
pda_db_user='pda'):
pda_db_user='pda', pod_node_id=False):
self = self.with_context(dict(self._context, is_mail=True))
for item in self:
try:
......@@ -1249,14 +1249,7 @@ class CcBl(models.Model):
user_obj = self.env['res.users'].search([('login', '=', pda_db_user)], limit=1)
ship_package_ids = [ship_package_dict for sublist in [d['id'] for d in ship_packages] for
ship_package_dict in sublist]
tally_state = 'checked_goods' if action_type == 'tally' else (
'picked_up' if action_type == 'pickup' else 'handover_completed')
# 后续节点
node_obj = self.env['cc.node'].sudo().search([
('node_type', '=', 'package'),
('tally_state', '=', tally_state) # 检查理货或尾程交接的节点,根据排序进行升序
], order='seq asc')
if node_obj:
node_obj = False
all_ship_package_obj = self.env['cc.ship.package'].search(
[('id', 'in', ship_package_ids)]) # 所有小包
# 预先获取所有同步日志 - 批量查询
......@@ -1278,7 +1271,15 @@ class CcBl(models.Model):
if package.get('tally_time'):
for single_id in package['id']:
ship_packages_dict[single_id] = package['tally_time']
if action_type != 'push_match_node':
tally_state = 'checked_goods' if action_type == 'tally' else (
'picked_up' if action_type == 'pickup' else 'handover_completed')
# 后续节点
node_obj = self.env['cc.node'].sudo().search([
('node_type', '=', 'package'),
('tally_state', '=', tally_state) # 检查理货或尾程交接的节点,根据排序进行升序
], order='seq asc')
if node_obj:
# 前序节点 理货或尾程交接之前没有生成的节点
before_node_obj = node_obj[0].get_before_node()
# 理货或尾程交接之前没有生成的节点
......@@ -1307,7 +1308,8 @@ class CcBl(models.Model):
if update_data:
# 构建批量更新SQL
values_str = ','.join(
self.env.cr.mogrify("(%s,%s,%s,%s,%s)", row).decode('utf-8') for row in update_data)
self.env.cr.mogrify("(%s,%s,%s,%s,%s)", row).decode('utf-8') for row in
update_data)
sql = """
UPDATE cc_ship_package AS t SET
state = c.state,
......@@ -1326,6 +1328,14 @@ class CcBl(models.Model):
user_obj=user_obj)
# 理货或尾程交接的节点
# 预先获取所有状态节点
if action_type == 'push_match_node' and pod_node_id:
#尾程匹配的节点包括前序节点
match_node_obj = self.env['cc.node'].sudo().search([('id', '=', pod_node_id)])
if match_node_obj:
node_obj = self.env['cc.node'].sudo().search([('node_type', '=', 'package'),('seq', '<=', match_node_obj.seq),('is_must', '=', True)], order='seq asc')
logging.info('node_obj: %s', node_obj)
if not node_obj:
return False
all_state_nodes = self.env['cc.node'].sudo().search([
('node_type', '=', 'package')
])
......
......@@ -42,10 +42,11 @@ class Order_dispose(object):
bl_record = bl_obj.browse(bl_ids)
else:
bl_record = bl_obj.browse(data['id'])
# utc_time = datetime.strptime(data['utc_time'], "%Y-%m-%d %H:%M:%S")
utc_time = data.get('utc_time')
user_login = data.get('user_login')
bl_record.mail_auto_push(utc_time, ship_packages, action_type, user_login, config.pda_db_user)
pod_node_id = data.get('pod_node_id')
bl_record.mail_auto_push(utc_time, ship_packages, action_type, user_login, config.pda_db_user,
pod_node_id=pod_node_id)
except Exception as ex:
logging.error('mail_auto_push error:%s' % str(ex))
return res_data
......@@ -54,7 +55,7 @@ class Order_dispose(object):
try:
pool = redis.ConnectionPool(**config.redis_options)
r = redis.Redis(connection_pool=pool)
logging.info(u'redis连接成功')
logging.info(u'redis connection success')
Order_dispose = Order_dispose()
while 1:
try:
......@@ -65,4 +66,4 @@ try:
logging.error(e)
continue
except Exception as e:
logging.error("登录失败:%s" % e)
logging.error("login failed:%s" % e)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论