Lan 3 nedēļas atpakaļ
vecāks
revīzija
ca4906e10f

BIN
backend/db.sqlite3


BIN
scheduler/__pycache__/processManager.cpython-310.pyc


BIN
scheduler/__pycache__/utils.cpython-310.pyc


+ 64 - 42
scheduler/algo1Folder/controller.py

@@ -1,8 +1,8 @@
 import requests
 import os
+import json
 import logging
-from time import sleep
-
+import time
 
 ''' 准备数据 '''
 SCHEDULER_BASE_URL = os.getenv("SCHEDULER_BASE_URL")
@@ -21,55 +21,77 @@ params = {
     "planId": planId,
 }
 
+print(json.dumps({'msg': 'started'}), flush=True)
 
-print("THIS is a algo program")
 response = requests.get(SCHEDULER_BASE_URL + '/fetchData', params=params, headers=headers)
 data = response.json()
-print("data is")
-print(data)
 if not data:
     quit()
-else:
-    print(data['nodes'])
-    print(data['edges'])
+print(json.dumps({'msg': 'start', 'data': data}), flush=True)
 
 ''' 开始计算 '''
+progressData = {
+    'missionId': missionId,
+    'planId': planId,
+    'progress': 0,
+}
+print(json.dumps({'msg': 'progress', 'data': progressData}), flush=True)
+start_time = time.perf_counter()
+count = 0
+while True:
+    count += 1
+    if time.perf_counter() - start_time >= 2.0:
+        break   
+progressData['progress'] = 20
+print(json.dumps({'msg': 'progress', 'data': progressData}), flush=True)
+start_time = time.perf_counter()
+count = 0
+while True:
+    count += 1
+    if time.perf_counter() - start_time >= 2.0:
+        break   
+
+progressData['progress'] = 40
+print(json.dumps({'msg': 'progress', 'data': progressData}), flush=True)
+start_time = time.perf_counter()
+count = 0
+while True:
+    count += 1
+    if time.perf_counter() - start_time >= 2.0:
+        break   
+progressData['progress'] = 60
+print(json.dumps({'msg': 'progress', 'data': progressData}), flush=True)
+start_time = time.perf_counter()
+count = 0
+while True:
+    count += 1
+    if time.perf_counter() - start_time >= 2.0:
+        break   
 
-for i in range(5):
-    response = requests.post(BACKEND_BASE_URL + "/rawDataTrans/", json={
-        'missionId': missionId,
-        'planId': planId,
-        'progress': i * 20,
-    })
-    sleep(3)
 
 ''' 完成计算 '''
-try:
-    response = requests.post(SCHEDULER_BASE_URL + '/report', json={
-        'missionId': missionId,
-        'planId': planId,
-        'state': 'DONE',
-        'results': {
-            'nodes': [[1, 'S'], [2, 'D'], [3, 'D'], [4, 'I']],
-            'edges': [[1, 2], [1, 4], [2, 4], [3, 4]],
-        },
-    })
-except Exception as error:
-    print("ERROR is", error)
+result = {
+    'missionId': missionId,
+    'planId': planId,
+    'progress': 100,
+    'nodes': [[1, 'S'], [2, 'D'], [3, 'D'], [4, 'I']],
+    'edges': [[1, 2], [1, 4], [2, 4], [3, 4]],
+}
+print(json.dumps({'msg': 'result', 'data': result}), flush=True)
 
-if response:
-    if response.json()['code'] == 'OK':
-        print("response is ok")
+# if response:
+#     if response.json()['code'] == 'OK':
+#         print("response is ok")
 
-        response = requests.post(BACKEND_BASE_URL + "/rawDataTrans/", json={
-            'missionId': missionId,
-            'planId': planId,
-            'progress': 100,
-            'nodes': [[1, 'S'], [2, 'D'], [3, 'D'], [4, 'I']],
-            'edges': [[1, 2], [1, 4], [2, 4], [3, 4]],
-        })
-        print(f"算法控制程序推送结果完毕 MissionId: {missionId} PlanId: {planId} Message: {response.json()}")
-    else:
-        print(f"算法控制程序结果反馈未被识别 MissionId: {missionId} PlanId: {planId}")
-else:
-    print(f"算法控制程序结果反馈失败 MissionId: {missionId} PlanId: {planId}")
+#         response = requests.post(BACKEND_BASE_URL + "/rawDataTrans/", json={
+#             'missionId': missionId,
+#             'planId': planId,
+#             'progress': 100,
+#             'nodes': [[1, 'S'], [2, 'D'], [3, 'D'], [4, 'I']],
+#             'edges': [[1, 2], [1, 4], [2, 4], [3, 4]],
+#         })
+#         print(f"算法控制程序推送结果完毕 MissionId: {missionId} PlanId: {planId} Message: {response.json()}")
+#     else:
+#         print(f"算法控制程序结果反馈未被识别 MissionId: {missionId} PlanId: {planId}")
+# else:
+#     print(f"算法控制程序结果反馈失败 MissionId: {missionId} PlanId: {planId}")

+ 4 - 4
scheduler/algorithms.config

@@ -1,4 +1,4 @@
-algA,algo1Folder,python controller.py
-algB,algo1Folder,python controller.py
-algC,algo1Folder,python controller.py
-algD,algo1Folder,python controller.py
+algA,algo1Folder,python -u controller.py
+algB,algo1Folder,python -u controller.py
+algC,algo1Folder,python -u controller.py
+algD,algo1Folder,python -u controller.py

+ 247 - 57
scheduler/processManager.py

@@ -5,6 +5,8 @@ import logging
 import threading
 import time
 import os
+import json
+from queue import Queue, Empty
 from typing import Dict, Optional, List, Union
 import requests
 
@@ -12,7 +14,7 @@ from utils import SCHEDULER_BASE_URL, BACKEND_BASE_URL
 
 
 class ProcessManager:
-    def __init__(self, check_interval: int = 5, timeout: int = 300):
+    def __init__(self, check_interval: int = 1, timeout: int = 300):
         self.processes: Dict[int, dict] = {}  # {pid: {meta}}
         self.lock = threading.Lock()
         self.check_interval = check_interval  # 检查间隔(秒)
@@ -20,6 +22,9 @@ class ProcessManager:
         self._monitor_thread: Optional[threading.Thread] = None
         self._running = False
 
+        # 与子进程的通信队列相关参数
+        self._reader_threads: Dict[int, threading.Thread] = {} # pid:
+
         # 配置日志
         self.log_dir: str = "process_logs"
         # 确保日志目录存在
@@ -30,6 +35,17 @@ class ProcessManager:
         )
         self.logger = logging.getLogger("ProcessManager")
 
+        # 准备日志文件
+        timestamp = time.strftime("%Y%m%d-%H%M%S")
+        log_prefix = os.path.join(
+            self.log_dir,
+            f"proc_{timestamp}_{os.getpid()}"
+        )
+        
+        # 打开日志文件
+        # with open(f"{log_prefix}.stdout", "w") as stdout_f, \
+        #         open(f"{log_prefix}.stderr", "w") as stderr_f:
+
     def spawn(
         self,
         command: Union[str, List[str]],
@@ -48,58 +64,112 @@ class ProcessManager:
         :return: 成功返回PID,失败返回None
         """
         try:
-            # 准备日志文件
-            timestamp = time.strftime("%Y%m%d-%H%M%S")
-            log_prefix = os.path.join(
-                self.log_dir,
-                f"proc_{timestamp}_{os.getpid()}"
+            # 创建子进程
+            env = {**os.environ, **(env or {})}
+            env.update({
+                "PYTHONUNBUFFERED": "1",  # 禁用缓冲
+                'SCHEDULER_BASE_URL': SCHEDULER_BASE_URL,
+                'BACKEND_BASE_URL': BACKEND_BASE_URL,
+                'missionId': str(meta['mission']['id']),
+                'planId': str(meta['plan']['id']),
+            })
+            proc = subprocess.Popen(
+                command,
+                cwd=cwd,
+                env=env,
+                shell=shell,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE,
+                text=True,
             )
-            
-            # 打开日志文件
-            with open(f"{log_prefix}.stdout", "w") as stdout_f, \
-                 open(f"{log_prefix}.stderr", "w") as stderr_f:
-
-                # 创建子进程
-                env = {**os.environ, **(env or {})}
-                env.update({
-                    'SCHEDULER_BASE_URL': SCHEDULER_BASE_URL,
-                    'BACKEND_BASE_URL': BACKEND_BASE_URL,
-                    'missionId': str(meta['mission']['id']),
-                    'planId': str(meta['plan']['id']),
-                })
-                proc = subprocess.Popen(
-                    command,
-                    cwd=cwd,
-                    env=env,
-                    shell=shell,
-                    stdout=stdout_f,
-                    stderr=stderr_f,
-                    text=True,
-                )
-
-                # 注册进程信息
-                with self.lock:
-                    self.processes[proc.pid] = {
-                        "proc": proc,
-                        "command": command,
-                        "start_time": time.time(),
-                        "last_active": time.time(),
-                        "log_stdout": stdout_f.name,
-                        "log_stderr": stderr_f.name,
-                        "meta": meta
-                    }
-
-                self.logger.info(
-                    f"创建子进程 PID={proc.pid} | "
-                    f"命令: {command} | "
-                    f"日志: {log_prefix}.*"
-                )
-                return proc.pid
+            self.logger.info(f"准备创建进程")
+            # 注册进程信息
+            with self.lock:
+                self.processes[proc.pid] = {
+                    "proc": proc,
+                    "command": command,
+                    "start_time": time.time(),
+                    "last_active": time.time(),
+                    "msg_queue": Queue(),
+                    "meta": meta
+                }
+                self.logger.info(f"准备开始监听")
+                self._start_reader(proc.pid, proc.stdout, proc.stderr)
+
+            self.logger.info(
+                f"创建子进程 PID={proc.pid} | "
+                f"命令: {command} | "
+            )
+            return proc.pid
 
         except Exception as e:
             self.logger.error(f"创建进程失败: {str(e)}")
             return None
 
+    def stop(self, missionId: int):
+        plansStopped = []
+        pids = [] # to delete
+        for pid in self.processes:
+            if int(self.processes[pid]['meta']['mission']['id']) == missionId:
+                pids.append(pid)
+                plansStopped.append({'planId': int(self.processes[pid]['meta']['plan']['id'])})
+        for pid in pids:
+            self.remove_process(pid)
+
+
+    def _start_reader(self, pid:int, stdout, stderr):
+        """为每个子进程启动独立的非阻塞读取线程"""
+        def reader_loop(pid, out_pipe, queue: Queue):
+            try:
+                while True:
+                    # 非阻塞读取 stdout
+                    try:
+                        out_line = out_pipe.readline()
+                        # 管道关闭返回空串
+                        if not out_line:
+                            break
+                        if out_line:
+                            queue.put(('stdout', out_line.strip()))
+                    except (IOError, ValueError):
+                        self.logger.info(f"进程消息读取错误 pid:{pid}")
+                        pass
+                    time.sleep(0.1)  # 降低 CPU 占用
+            except Exception as e:
+                self.logger.error(f"读取子进程消息错误: {str(e)}")
+        def reader_err_loop(pid, err_pipe, queue: Queue):
+            try:
+                while True:
+                    # 非阻塞读取 stderr
+                    try:
+                        err_line = err_pipe.readline()
+                        # 管道关闭返回空串
+                        if not err_line:
+                            break
+                        if err_line:
+                            queue.put(('stderr', err_line.strip()))
+                    except (IOError, ValueError):
+                        self.logger.info(f"进程错误读取错误 pid:{pid}")
+                        pass
+                    time.sleep(0.1)  # 降低 CPU 占用
+            except Exception as e:
+                self.logger.error(f"读取子进程消息错误: {str(e)}")
+
+        # 创建并启动消息读取线程
+        t = threading.Thread(
+            target=reader_loop,
+            args=(pid, stdout, self.processes[pid]["msg_queue"]),
+            daemon=True
+        )
+        t.start()
+        # 创建并启动错误读取线程
+        tE = threading.Thread(
+            target=reader_err_loop,
+            args=(pid, stderr, self.processes[pid]["msg_queue"]),
+            daemon=True
+        )
+        tE.start()
+        self._reader_threads[pid] = [t, tE]
+
     def start_monitoring(self):
         """启动后台监控线程"""
         if self._running:
@@ -128,14 +198,47 @@ class ProcessManager:
                     proc = psutil.Process(pid)
                     proc.terminate()
                     del self.processes[pid]
+                    # 清理读取线程
+                    if pid in self._reader_threads:
+                        for t in self._reader_threads[pid]:
+                            t.join()
+                        del self._reader_threads[pid]
                     self.logger.info(f"移除处理进程监视 MissionId: {missionId} PlanId: {planId}")
                     return True
+                except psutil.NoSuchProcess:
+                    self.logger.error(f"进程已自行退出 MissionId: {missionId} PlanId: {planId}")
+                    return True
                 except Exception as error:
                     self.logger.error(f"移除处理进程监视失败 MissionId: {missionId} PlanId: {planId} Error: {error}")
                     return False
         self.logger.info(f"该处理进程不在监视中 MissionId: {missionId} PlanId: {planId}")
         return True
-                
+    
+
+    def remove_process(self, pid_to_del: int):
+        self.logger.error(f"移除处理进程-with pid")
+        try:
+            # 清理读取线程
+            if pid_to_del in self._reader_threads:
+                for t in self._reader_threads[pid_to_del]:
+                    t.join()
+                del self._reader_threads[pid_to_del]
+            for pid, info in self.processes.items():
+                if pid == pid_to_del:
+                    missionId = info['meta']['mission']['id']
+                    planId = info["meta"]["plan"]["id"]
+                    proc = psutil.Process(pid)
+                    proc.terminate()
+                    del self.processes[pid]
+                    self.logger.info(f"移除处理进程监视成功 MissionId: {missionId} PlanId: {planId}")
+                    return
+            self.logger.error(f"未找到请求移除的进程 MissionId: {missionId} PlanId: {planId}")
+        except psutil.NoSuchProcess:
+            self.logger.error(f"进程已自行退出 MissionId: {missionId} PlanId: {planId}")
+        except Exception as error:
+            self.logger.error(f"移除处理进程监视-with pid失败 pid:{pid_to_del}")
+
+
 
     def _monitor_loop(self):
         """监控主循环"""
@@ -151,12 +254,11 @@ class ProcessManager:
         """执行进程状态检查"""
         current_time = time.time()
         dead_pids = []
-
+        reportDatas = []
         with self.lock:
             for pid, info in self.processes.items():
                 try:
                     proc = psutil.Process(pid)
-
                     # 检测崩溃
                     if proc.status() == psutil.STATUS_ZOMBIE:
                         self.logger.warning(f"进程 {pid} 处于僵尸状态")
@@ -176,18 +278,104 @@ class ProcessManager:
                     info["last_active"] = current_time
 
                 except psutil.NoSuchProcess:
-                    response = requests.post(SCHEDULER_BASE_URL + '/report', json={'missionId': info['meta']['mission']['id'], 'planId': info['meta']['plan']['id'], 'state': 'CRASH', 'results': None})
-                    code = response.json()['code']
-                    if code == 'OK':
-                        self.logger.warning(f"进程 {pid} 已正常退出")
-                        dead_pids.append(pid)
-                    if code == 'ERROR':
-                        dead_pids.append(pid)
+                    # 无法找到进程时,可能子进程已正常退出,读取所有管道中剩余数据,判断是否崩溃
+                    response = {}
+                    try:
+                        while True:
+                            self.logger.info(f"111")
+                            pipe_type, message = info["msg_queue"].get_nowait()
+                            self.logger.info(f"222")
+                            response = self._handle_process_message(pid, pipe_type, message)
+                            self.logger.info(f"返回信息1{response}")
+                    except Empty:
+                        self.logger.info(f"333")
+                        pass
+                    except Exception as error:
+                        self.logger.error(f"ERROR:{error}")
+                    self.logger.info(f"返回信息2{response}")
+                    if 'finished' in response and response['finished']:
+                        # 虽然已经找不到进程,但是由于进程已正常退出
+                        # 将获取的result缓存,待释放锁后向调度器汇报
+                        reportDatas.append({
+                            'missionId': info["meta"]["mission"]["id"],
+                            'planId': info["meta"]["plan"]["id"],
+                            'state': 'DONE',
+                            'results': response['results'],
+                        })
+                        
+                        self.remove_process(pid)
+                    else:
+                        # 非正常退出(未传递finished信息)
                         self._handle_crash(pid, info)
 
+                    # 无论如何都加入待清理列表
+                    dead_pids.append(pid)
+                
+                # 正常读取子进程输出
+                try:
+                    while True:
+                        pipe_type, message = info["msg_queue"].get_nowait()
+                        response = self._handle_process_message(pid, pipe_type, message)
+                        if 'finished' in response and response['finished']:
+                            # 正常退出
+                            # 将获取的result缓存,待释放锁后向调度器汇报
+                            reportDatas.append({
+                                'missionId': info["meta"]["mission"]["id"],
+                                'planId': info["meta"]["plan"]["id"],
+                                'state': 'DONE',
+                                'results': response['results'],
+                            })
+                            self.remove_process(pid)
+                            dead_pids.append(pid)
+                except Empty:
+                    pass
+                
+
             # 清理已终止进程
             for pid in dead_pids:
                 del self.processes[pid]
+                # 清理读取线程
+                if pid in self._reader_threads:
+                    for t in self._reader_threads[pid]:
+                        t.join()
+                    del self._reader_threads[pid]    
+        # 锁已释放
+        # 依次汇报已结束任务,获取下一步任务
+        for report in reportDatas:
+            response = requests.post(SCHEDULER_BASE_URL + "/report", json=report)
+            self.logger.info(f"进程结果已提交调度器 mission:{report['missionId']} plan:{report['planId']} response:{response}")
+
+    def _handle_process_message(self, pid:int, pipe_type:str, message:str):
+        """处理来自子进程的通信消息"""
+        try:
+            # 解析 JSON 格式消息
+            data = json.loads(message)
+            self.logger.info(f"收到进程消息 PID={pid}: {data}")
+
+            # 更新最后活跃时间
+            self.processes[pid]["last_active"] = time.time()
+
+            # 处理完成消息
+            msg = data.get("msg")
+            if msg == "progress":
+                # 获得进度汇报,向django汇报
+                response = requests.post(BACKEND_BASE_URL + "/rawDataTrans/", json=data.get("data"))
+                return {'finished': False}
+            
+            if msg == "result":
+                # 获得返回结果,向django汇报
+                response = requests.post(BACKEND_BASE_URL + "/rawDataTrans/", json=data.get("data"))
+                self.logger.info(f"进程结果已向后端服务器反馈 pid:{pid} response:{response}")
+
+                self.logger.info(f"进程 {pid} 报告已完成")
+                # 标记该进程正常退出
+                return {'finished': True, 'results': data.get('data')}
+            return {'finished': False}
+
+        except json.JSONDecodeError:
+            self.logger.warning(f"无效消息格式 PID={pid}: {message}")
+            return {'finished': False}
+
 
     def _handle_crash(self, pid: int, info: dict):
         """进程崩溃处理逻辑"""
@@ -203,6 +391,8 @@ class ProcessManager:
             f"命令: {info['command']}\n"
             f"最后错误输出:\n{last_lines}"
         )
+        # 发现进程崩溃,向django汇报任务失败
+        response = requests.post(BACKEND_BASE_URL + "/rawDataTrans/", json={'missionId': info['meta']['mission']['id'], 'planId': info['meta']['plan']['id'], 'progress': -1})
         # 向flask发送进程崩溃信息
         response = requests.post(SCHEDULER_BASE_URL + '/report', json={'missionId': info['meta']['mission']['id'], 'planId': info['meta']['plan']['id'], 'state': 'DEAD', 'results': None})
 

+ 41 - 6
scheduler/scheduler.py

@@ -27,8 +27,8 @@ db = SQLAlchemy(app)
 
 # 创建并启动子进程管理器
 manager = ProcessManager(
-    check_interval=10,
-    timeout=600,
+    # check_interval=1,
+    # timeout=600,
 )
 manager.start_monitoring()
 
@@ -67,19 +67,21 @@ def report():
             logger.info(f"计算任务完成: MissionId:{missionId} PlanId:{planId}")
             # 现有任务完成,搜索下一任务
             nextTasks = store.solveMission(missionId=missionId, planId=planId, results=results)
+            logger.info(f"Next Tasks{nextTasks}")
             for nextTask in nextTasks:
                 task = store.prepareTask(missionId=missionId, planId=nextTask['id'])
                 if manager.spawn(command=task['command'], cwd=task['cwd'], plan=task['plan'], mission=task['mission']):
                     store.addActiveTask(missionId=missionId, planId=task['plan']['id'])
+                    logger.info(f"创建后续计算任务成功 MissionId:{missionId} PlanId:{task['plan']['id']}")
                 else:
-                    logger.error(f"创建计算任务失败 MissionId:{missionId} PlanId:{task['plan']['id']}")
+                    logger.info(f"创建后续计算任务失败 MissionId:{missionId} PlanId:{task['plan']['id']}")
             return jsonify({"code": "OK"})
         else:
             logger.error(f"计算任务无法完成: MissionId:{missionId} PlanId:{planId}")
     # 超时任务处理逻辑待完善
     if state == "DEAD" or state == "TIMEOUT":
         if store.removeActiveTask(missionId=missionId, planId=planId):
-            logger.info(f"移除错误计算任务: MissionId:{missionId} PlanId:{planId}")
+            logger.info(f"移除终止计算任务: MissionId:{missionId} PlanId:{planId}")
             return jsonify({"code": "OK"})
         else:
             logger.error(f"移除错误计算任务失败,已强制清除: MissionId:{missionId} PlanId:{planId}")
@@ -103,8 +105,41 @@ def start_task():
 
         return jsonify({"code": "OK", "status": "started"})
     else:
-        return jsonify({"code": "ERROR", "startus": "duplicated"})
+        return jsonify({"code": "ERROR", "status": "duplicated"})
+
+@app.route('/pauseMission', methods=['POST'])
+def pause_task():
+    mission = request.json['mission']
+    # 暂停mission的方法是停止当前正在执行的算法子进程,但是保留mission任务栈,等待恢复命令
+    # 在store中,mission的task保存了当前正在执行的所有plan,因此并不需要对store做特殊处理
+    plan = manager.stop(missionId=int(mission['id']))
+    if plan:
+        return jsonify({"code": "OK", "status": "paused"})
+    else:
+        return jsonify({"code": "ERROR", "status": "Failed to pause"})
     
+@app.route('/resumeMission', methods=['POST'])
+def resume_task():
+    mission = request.json['mission']
+    resumedTasks = store.resumeMission(missionId=int(mission['id']))
+    for nextTask in resumedTasks:
+        task = store.prepareTask(missionId=int(mission['id']), planId=nextTask['id'])
+        if manager.spawn(command=task['command'], cwd=task['cwd'], plan=task['plan'], mission=task['mission']):
+            logger.info(f"任务 MissionId:{mission['id']} 已恢复")
+        else:
+            logger.info(f"任务 MissionId:{mission['id']} 恢复运行出错")
+
+def stop_task():
+    # 被停止的mission将无法恢复,因此可以直接删除其所有上下文消息
+    mission = request.json['mission']
+    plan = manager.stop(missionId=int(mission['id']))
+    if not plan:
+        logger.info(f"停止 MissionId:{mission['id']} 出错,未能停止现有进程")
+    if store.stopMission(missionId=int(mission['id'])):
+        logger.info(f"停止 MissionId:{mission['id']} 成功")
+    else:
+        logger.info(f"停止 MissionId:{mission['id']} 失败")
+
 
 @app.route('/get_status/<task_id>')
 def get_status(task_id):
@@ -112,4 +147,4 @@ def get_status(task_id):
     return jsonify({"status": "running", "progress": 75})
 
 if __name__ == '__main__':
-    app.run(port=5000)
+    app.run(port=5000, debug=False)

+ 22 - 7
scheduler/utils.py

@@ -22,7 +22,7 @@ for algo in algoConfig:
     algoList.append({
         'name': algo[0],
         'path': BASE_DIR / algo[1],
-        'command': algo[2],
+        'command': algo[2].split(' '),
     })
 
 
@@ -57,12 +57,9 @@ class Store:
         return False
 
     def removeActiveTask(self, missionId: int, planId: int):
-        originLen = len(self.activeTasks)
+        logger.info(f"selfACTIVETASL{self.activeTasks}")
         self.activeTasks = [task for task in self.activeTasks if task['missionId'] != missionId and task['planId'] != planId]
-        if originLen != (len(self.activeTasks) + 1):
-            # 没有删除或者删除多于一个?
-            print("没有删除Task或删除多于一个Task")
-            return False
+        logger.info(f"selfACTIVETASL{self.activeTasks}")
         return True
 
     # 添加任务时仅放入第一级待计算plans,后续每个plan完成计算后根据children列表寻找下一个plan继续计算
@@ -146,11 +143,29 @@ class Store:
             return []
         # 返回下一轮需要调用的任务,如果存在并行,则列表长度大于1
         return [plan for plan in mission['plans'] if plan['id'] in children]
+    
+    def resumeMission(self, missionId: int):
+        mission = [m for m in self.missions if m['id'] == missionId]
+        if not mission:
+            logger.info("未找到要恢复的mission")
+        else:
+            mission = mission[0]
+        # 返回所有的tasks,由于暂停时仅在maanger中停止了进程,而没有在store中处理,因此tasks中保存的就是此前的运行任务
+        return [plan for plan in mission['plans'] if plan['id'] in mission['tasks']]
             
+    def stopMission(self, missionId: int):
+        mission = [m for m in self.missions if m['id'] == missionId]
+        if not mission:
+            logger.info("未找到要停止的mission")
+            return False
+        # 停止后不需恢复,所以直接摘除该mission即可
+        self.missions = [m for m in self.missions if m['id'] != missionId]
+        return True
+
     def initMissionTasks(self, missionId: int):
         mission = [m for m in self.missions if m['id'] == missionId]
         if not mission:
-            print("未找到mission,是否错误传入更新请求")
+            logger.info("未找到mission,是否错误传入更新请求")
             return []
         else:
             mission = mission[0]

+ 1 - 0
viewer/src/api/axios.js

@@ -52,6 +52,7 @@ export async function getData(url = '', params = {}){
     api.headers = {
         'Content-Type': 'application/json'
     }
+
     const response = await api.get(url, {params: params});
     return response.data;
 }

+ 4 - 5
viewer/src/views/dashoard/calculate.vue

@@ -262,12 +262,11 @@ const createSmartPath = (start, end) => {
 }
 
 const startCalculate = async () => {
-  // 测试用模拟进度100%
-  progress.value.forEach((item, index) => progress.value[index] = 100)
-  return
-
-  
+  // // 测试用模拟进度100%
+  // progress.value.forEach((item, index) => progress.value[index] = 100)
+  // return
 
+  // 真实调用后台数据处理
   const response = await postData('/calculate/', {
     mission: mission.id,
     command: 'start',