Lan 1 mês atrás
pai
commit
e9a7e01a9e
100 arquivos alterados com 3895 adições e 172 exclusões
  1. BIN
      backend/api/__pycache__/__init__.cpython-310.pyc
  2. BIN
      backend/api/__pycache__/admin.cpython-310.pyc
  3. BIN
      backend/api/__pycache__/api_alert.cpython-310.pyc
  4. BIN
      backend/api/__pycache__/api_calculate.cpython-310.pyc
  5. BIN
      backend/api/__pycache__/api_graph.cpython-310.pyc
  6. BIN
      backend/api/__pycache__/api_graphicFile.cpython-310.pyc
  7. BIN
      backend/api/__pycache__/api_prepare.cpython-310.pyc
  8. BIN
      backend/api/__pycache__/api_rawDataTrans.cpython-310.pyc
  9. BIN
      backend/api/__pycache__/api_results.cpython-310.pyc
  10. BIN
      backend/api/__pycache__/api_system.cpython-310.pyc
  11. BIN
      backend/api/__pycache__/api_taskFile.cpython-310.pyc
  12. BIN
      backend/api/__pycache__/api_user.cpython-310.pyc
  13. BIN
      backend/api/__pycache__/apps.cpython-310.pyc
  14. BIN
      backend/api/__pycache__/middleware.cpython-310.pyc
  15. BIN
      backend/api/__pycache__/scheduler.cpython-310.pyc
  16. BIN
      backend/api/__pycache__/serializers.cpython-310.pyc
  17. BIN
      backend/api/__pycache__/tokenAuthentication.cpython-310.pyc
  18. BIN
      backend/api/__pycache__/urls.cpython-310.pyc
  19. BIN
      backend/api/__pycache__/utils.cpython-310.pyc
  20. 15 12
      backend/api/api_calculate.py
  21. 27 17
      backend/api/api_graph.py
  22. 0 1
      backend/api/api_graphicFile.py
  23. 121 2
      backend/api/api_prepare.py
  24. 57 33
      backend/api/api_rawDataTrans.py
  25. 15 0
      backend/api/api_results.py
  26. 3 0
      backend/api/api_user.py
  27. 32 0
      backend/api/middleware.py
  28. 38 0
      backend/api/migrations/0022_mission_description_alter_alert_name_and_more.py
  29. 18 0
      backend/api/migrations/0023_file_encrypted.py
  30. 18 0
      backend/api/migrations/0024_file_key.py
  31. 18 0
      backend/api/migrations/0025_user_session_key.py
  32. BIN
      backend/api/migrations/__pycache__/0001_initial.cpython-310.pyc
  33. BIN
      backend/api/migrations/__pycache__/0002_alter_user_options_user_last_login.cpython-310.pyc
  34. BIN
      backend/api/migrations/__pycache__/0003_view_file.cpython-310.pyc
  35. BIN
      backend/api/migrations/__pycache__/0004_rename_display_name_user_displayname_file_usage.cpython-310.pyc
  36. BIN
      backend/api/migrations/__pycache__/0005_file_associate_file_content.cpython-310.pyc
  37. BIN
      backend/api/migrations/__pycache__/0006_alter_file_associate.cpython-310.pyc
  38. BIN
      backend/api/migrations/__pycache__/0007_fileinfo.cpython-310.pyc
  39. BIN
      backend/api/migrations/__pycache__/0008_mission_result.cpython-310.pyc
  40. BIN
      backend/api/migrations/__pycache__/0009_alter_fileinfo_file_alter_mission_name.cpython-310.pyc
  41. BIN
      backend/api/migrations/__pycache__/0010_algorithm_plan.cpython-310.pyc
  42. BIN
      backend/api/migrations/__pycache__/0011_result_plan_result_state.cpython-310.pyc
  43. BIN
      backend/api/migrations/__pycache__/0012_result_edgefile_result_nodefile_alter_result_plan.cpython-310.pyc
  44. BIN
      backend/api/migrations/__pycache__/0013_remove_result_state_alter_file_usage.cpython-310.pyc
  45. BIN
      backend/api/migrations/__pycache__/0014_result_progress.cpython-310.pyc
  46. BIN
      backend/api/migrations/__pycache__/0015_mission_state.cpython-310.pyc
  47. BIN
      backend/api/migrations/__pycache__/0016_alter_result_edgefile_alter_result_nodefile.cpython-310.pyc
  48. BIN
      backend/api/migrations/__pycache__/0017_graph_graphtoken.cpython-310.pyc
  49. BIN
      backend/api/migrations/__pycache__/0018_rename_edgemap_graph_edges_and_more.cpython-310.pyc
  50. BIN
      backend/api/migrations/__pycache__/0019_alter_graph_user.cpython-310.pyc
  51. BIN
      backend/api/migrations/__pycache__/0020_alert_systemperformance.cpython-310.pyc
  52. BIN
      backend/api/migrations/__pycache__/0021_rename_indicator_alert_metric.cpython-310.pyc
  53. BIN
      backend/api/migrations/__pycache__/0022_mission_description_alter_alert_name_and_more.cpython-310.pyc
  54. BIN
      backend/api/migrations/__pycache__/0023_file_encrypted.cpython-310.pyc
  55. BIN
      backend/api/migrations/__pycache__/0024_file_key.cpython-310.pyc
  56. BIN
      backend/api/migrations/__pycache__/0025_user_session_key.cpython-310.pyc
  57. BIN
      backend/api/migrations/__pycache__/__init__.cpython-310.pyc
  58. BIN
      backend/api/models/__pycache__/__init__.cpython-310.pyc
  59. BIN
      backend/api/models/__pycache__/alert.cpython-310.pyc
  60. BIN
      backend/api/models/__pycache__/algorithm.cpython-310.pyc
  61. BIN
      backend/api/models/__pycache__/file.cpython-310.pyc
  62. BIN
      backend/api/models/__pycache__/graph.cpython-310.pyc
  63. BIN
      backend/api/models/__pycache__/mission.cpython-310.pyc
  64. BIN
      backend/api/models/__pycache__/plan.cpython-310.pyc
  65. BIN
      backend/api/models/__pycache__/result.cpython-310.pyc
  66. BIN
      backend/api/models/__pycache__/system.cpython-310.pyc
  67. BIN
      backend/api/models/__pycache__/user.cpython-310.pyc
  68. BIN
      backend/api/models/__pycache__/view.cpython-310.pyc
  69. 238 21
      backend/api/models/file.py
  70. 46 19
      backend/api/models/graph.py
  71. 1 0
      backend/api/models/mission.py
  72. 2 1
      backend/api/models/user.py
  73. 12 7
      backend/api/tokenAuthentication.py
  74. 5 1
      backend/api/urls.py
  75. BIN
      backend/backend/__pycache__/__init__.cpython-310.pyc
  76. BIN
      backend/backend/__pycache__/settings.cpython-310.pyc
  77. BIN
      backend/backend/__pycache__/urls.cpython-310.pyc
  78. BIN
      backend/backend/__pycache__/wsgi.cpython-310.pyc
  79. 1 0
      backend/backend/settings.py
  80. BIN
      backend/db.sqlite3
  81. BIN
      scheduler/__pycache__/processManager.cpython-310.pyc
  82. BIN
      scheduler/__pycache__/processManager.cpython-38.pyc
  83. BIN
      scheduler/__pycache__/utils.cpython-310.pyc
  84. BIN
      scheduler/__pycache__/utils.cpython-38.pyc
  85. 96 0
      scheduler/algo1Folder/AUC.py
  86. BIN
      scheduler/algo1Folder/__pycache__/AUC.cpython-38.pyc
  87. 93 0
      scheduler/algo1Folder/controller-back.py
  88. 904 58
      scheduler/algo1Folder/controller.py
  89. 336 0
      scheduler/algo1Folder/edges.csv
  90. 180 0
      scheduler/algo1Folder/nodes.csv
  91. 0 0
      scheduler/algo1Folder/丁瑞华-拓扑优化
  92. BIN
      scheduler/algo1Folder/测试输出/边集(教师模型测试).xlsx
  93. BIN
      scheduler/algo1Folder/测试输出/边集(教师测试删除).xlsx
  94. BIN
      scheduler/algo1Folder/测试输出/边集(教师预测).xlsx
  95. 646 0
      scheduler/algo2Folder/controller.py
  96. 2 0
      scheduler/algo2Folder/亢靖-功能体探测-层级关系
  97. 96 0
      scheduler/algo3Folder/AUC.py
  98. BIN
      scheduler/algo3Folder/__pycache__/AUC.cpython-38.pyc
  99. 873 0
      scheduler/algo3Folder/controller.py
  100. 2 0
      scheduler/algo3Folder/姚亚林-链路预测

BIN
backend/api/__pycache__/__init__.cpython-310.pyc


BIN
backend/api/__pycache__/admin.cpython-310.pyc


BIN
backend/api/__pycache__/api_alert.cpython-310.pyc


BIN
backend/api/__pycache__/api_calculate.cpython-310.pyc


BIN
backend/api/__pycache__/api_graph.cpython-310.pyc


BIN
backend/api/__pycache__/api_graphicFile.cpython-310.pyc


BIN
backend/api/__pycache__/api_prepare.cpython-310.pyc


BIN
backend/api/__pycache__/api_rawDataTrans.cpython-310.pyc


BIN
backend/api/__pycache__/api_results.cpython-310.pyc


BIN
backend/api/__pycache__/api_system.cpython-310.pyc


BIN
backend/api/__pycache__/api_taskFile.cpython-310.pyc


BIN
backend/api/__pycache__/api_user.cpython-310.pyc


BIN
backend/api/__pycache__/apps.cpython-310.pyc


BIN
backend/api/__pycache__/middleware.cpython-310.pyc


BIN
backend/api/__pycache__/scheduler.cpython-310.pyc


BIN
backend/api/__pycache__/serializers.cpython-310.pyc


BIN
backend/api/__pycache__/tokenAuthentication.cpython-310.pyc


BIN
backend/api/__pycache__/urls.cpython-310.pyc


BIN
backend/api/__pycache__/utils.cpython-310.pyc


+ 15 - 12
backend/api/api_calculate.py

@@ -44,8 +44,10 @@ class CalculateAPI(APIView):
         return success(message="任务已完成")
     else:
       # 非启动任务需要检查任务是否已经开始
-      if not mission.state == 'calculating':
-        return failed(message="任务没有在运行,无法暂停或停止")
+      if not mission.state == 'calculating' and command == 'pause':
+        return failed(message="任务没有在运行,无法暂停")
+      if not ( mission.state == 'calculating' or mission.state == 'pause') and command == 'stop':
+        return failed(message="任务没有在运行或暂停,无法停止")
     # 向调度程序提交计算任务
     # mission = request.json['mission']
     # plans = request.json['plans']
@@ -80,11 +82,11 @@ class CalculateAPI(APIView):
             parentPlan = currentPlan.parent
             # 判断是否父节点是根节点,是则用mission的数据作为输入
             if parentPlan.parent == None:
-              nodesJson = mission.nodeFile.toJson()
-              edgesJson = mission.edgeFile.toJson()
+              nodesJson = mission.nodeFile.toJson(request)
+              edgesJson = mission.edgeFile.toJson(request)
             else:
-              nodesJson = parentPlan.own_result.nodeFile.toJson()
-              edgesJson = parentPlan.own_result.edgeFile.toJson()
+              nodesJson = parentPlan.own_result.nodeFile.toJson(request)
+              edgesJson = parentPlan.own_result.edgeFile.toJson(request)
             latestPlans.append(currentPlan)
             calculateData['plans'].append({
               'id': currentPlan.id,
@@ -121,8 +123,8 @@ class CalculateAPI(APIView):
       # 如果不是恢复计算任务,则正常计算
       calculateData['plans'].append({
         'id': rootPlan.id,
-        'nodes': mission.nodeFile.toJson(),
-        'edges': mission.edgeFile.toJson(),
+        'nodes': mission.nodeFile.toJson(request),
+        'edges': mission.edgeFile.toJson(request),
         'children': list(mission.own_plans.filter(parent=rootPlan).values_list('id', flat=True)),
       })
 
@@ -137,8 +139,8 @@ class CalculateAPI(APIView):
             calculateData['plans'].append({
               'id': p.id,
               'algorithm': p.algorithm.name,
-              'nodes': p.parent.own_result.nodeFile.toJson(),
-              'edges': p.parent.own_result.edgeFile.toJson(),
+              'nodes': p.parent.own_result.nodeFile.toJson(request),
+              'edges': p.parent.own_result.edgeFile.toJson(request),
               'children': [child.id for child in children],
             })
           else:
@@ -150,6 +152,7 @@ class CalculateAPI(APIView):
               'children': [child.id for child in children],
             })
           rootPlans.extend(children)
+      # logger.warning(calculateData)
       response = requests.post(SCHEDULER_BASE_URL + '/addMission', json=calculateData)
       print(response.json())
       if response.json()['code'] == 'OK':
@@ -201,7 +204,7 @@ class CalculateAPI(APIView):
           if hasattr(plan, 'own_result'):
             result = plan.own_result
             result.delete()
-        return success(message="暂停计算任务成功")
+        return success(message="暂停停止计算任务成功")
       else:
         print(response)
-        return failed(message="停计算任务失败", data=response)
+        return failed(message="停计算任务失败", data=response)

+ 27 - 17
backend/api/api_graph.py

@@ -12,9 +12,11 @@ from django.contrib.auth import login
 from api.utils import *
 from api.models import Result, Graph, GraphToken, Plan
 from random import randint
-
+import logging
 import json, csv
 
+logger = logging.getLogger("graph")
+
 class ViewGraphByToken(APIView):
     # 通过验证码看图——供VR使用
     authentication_classes = []
@@ -47,7 +49,11 @@ class GenerateGraph(APIView):
         except Plan.DoesNotExist:
             print("获取结果的Plan失败")
             return failed(message="无法找到该结果对应规划")
-        result = plan.own_result
+        try:
+            result = plan.own_result
+        except Result.DoesNotExist:
+            logger.error(f"获取任务{plan.mission.id}规划{plan.id}的结果失败,结果不存在")
+            return failed(message="结果不存在")
 
         # 图表显示不生成图,仅做统计计算
         if method == 'chart':
@@ -184,19 +190,19 @@ class GenerateGraph(APIView):
                 return success(data={
                     'token': token,
                 })
+        # 不存在则需要重新生成图
         else:
             nodeJson = result.nodeFile.toJson()
             edgeJson = result.edgeFile.toJson()
 
-
 ######测试用,添加几个标签
-            for node in nodeJson:
-                node['meta'].append({'optimize': 'new'})
-                node['meta'].append({'group': randint(1,5)})
-                node['meta'].append({'predict': 'new'})
-            for edge in edgeJson:
-                edge['meta'].append({'optimize': 'new'})
-                edge['meta'].append({'predict': 'new'})
+            # for node in nodeJson:
+            #     node['meta'].append({'optimize': 'new'})
+            #     node['meta'].append({'group': randint(1,5)})
+            #     node['meta'].append({'predict': 'new'})
+            # for edge in edgeJson:
+            #     edge['meta'].append({'optimize': 'new'})
+            #     edge['meta'].append({'predict': 'new'})
 ########################
 
 
@@ -229,7 +235,10 @@ class GenerateGraph(APIView):
                         if 'group' in meta and type(meta['group']) == int:
                             missingLabel = False
                     if missingLabel:
-                        return failed(message="无功能体标签")
+                        # 存在节点没有功能体标签,则该节点应被视为孤立节点
+                        # 孤立节点添加一个-1的group表示孤立
+                        node['meta'].append({'group': -1})
+                        # return failed(message="无功能体标签")
                 for edge in edgeJson:
                     if not 'from' in edge or not 'to' in edge:
                         return failed(message="边文件存在问题")
@@ -240,12 +249,13 @@ class GenerateGraph(APIView):
                     if not 'id' in node or not 'type' in node:
                         return failed(message="节点文件存在问题")
                     # 对于演化预测算法,节点的属性中应有新旧区分
-                    missingLabel = True
-                    for meta in node['meta']:
-                        if 'predict' in meta and meta['predict'] in ['new', 'old']:
-                            missingLabel = False
-                    if missingLabel:
-                        return failed(message="无演化预测标签")
+                    # 演化预测和拓扑优化节点本身都没有meta属性,仅对边有区分
+                    # missingLabel = True
+                    # for meta in node['meta']:
+                    #     if 'predict' in meta and meta['predict'] in ['new', 'old']:
+                    #         missingLabel = False
+                    # if missingLabel:
+                    #     return failed(message="无演化预测标签")
                 for edge in edgeJson:                 
                     if not 'from' in edge or not 'to' in edge:
                         return failed(message="边文件存在问题")

+ 0 - 1
backend/api/api_graphicFile.py

@@ -8,7 +8,6 @@ from api.utils import *
 class GraphicSelectAPI(APIView):
     def get(self , request):
         user = request.user
-        print(user)
         graphics = []
         if(user.identity == 'admin'):
             graphiclist = Graph.objects.all()

+ 121 - 2
backend/api/api_prepare.py

@@ -29,6 +29,8 @@ class UploadFileAPI(APIView):
     def post(self, request):
         user = request.user
         # 获取上传的文件对象
+        missionName = request.data.get('missionName')
+        missionDescription = request.data.get('missionDescription')
         nodeFileName = request.data.get('nodeFileName')
         edgeFileName = request.data.get('edgeFileName')
         nodeFile = request.data.get('nodes')
@@ -94,6 +96,10 @@ class UploadFileAPI(APIView):
             mission.nodeFile = file
             mission.edgeFile = pre_file
             mission.user = user
+            if missionName:
+                mission.name = missionName
+            if missionDescription:
+                mission.description = missionDescription
             mission.save()
             successUploadedFiles.append({
                 "id": mission.id,
@@ -134,6 +140,8 @@ class UploadFileAPI(APIView):
 class InputFileAPI(APIView):
     def post(self, request):
         user = request.user
+        missionName = request.data.get('missionName')
+        missionDescription = request.data.get('missionDescription')
         nodes = request.data.get('nodes')
         edges = request.data.get('edges')
         logger.info(nodes)
@@ -146,7 +154,9 @@ class InputFileAPI(APIView):
             for node in nodes:
                 nodeList = [int(node['id']), str(node['type'])]
                 for key in [k for k in node if k not in ['id', 'type']]:
-                    nodeList.append(str(key) + ':' +str(node[key]))
+                    # 检测是否有值,前端为输入节点名称时会有空值
+                    if node[key]:
+                        nodeList.append(str(key) + ':' +str(node[key]))
                 nodesCsvList.append(nodeList)
             logger.info(nodesCsvList)
             nodesFile.generate(nodesCsvList)
@@ -187,6 +197,10 @@ class InputFileAPI(APIView):
             mission.edgeFile = edgesFile
             mission.user = user
             mission.state = 'init'
+            if missionName != '':
+                mission.name = missionName
+            if missionDescription != '':
+                mission.description = missionDescription
             mission.save()
             successUploadedFiles.append({
                 "id": mission.id,
@@ -214,6 +228,89 @@ class InputFileAPI(APIView):
             return failed(message="节点和边信息输入失败,节点或边信息未通过合法性检测")
 
 
+class DownloadFileAPI(APIView):
+    def get(self, request):
+        user = request.user
+        fileId = request.GET.get('fileId')
+        try:
+            file = File.objects.get(id=fileId)
+        except File.DoesNotExist:
+            return failed(message="文件不存在")
+        if file.user != user:
+            return failed(message="非本用户上传文件")
+        response = file.download()
+        if response:
+            return response
+        else:
+            return failed(message="下载文件失败")
+
+class EncryptFileAPI(APIView):
+    def get(self, request):
+        missionId = request.GET.get('missionId')
+        try:
+            mission = Mission.objects.get(id=missionId)
+        except Mission.DoesNotExist:
+            logger.error(f"解密任务{missionId}失败,未找到该任务")
+        response = []
+        if mission.nodeFile.encrypted:
+            response.append({
+                'id': mission.nodeFile.id,
+                'content': 'node',
+                'name': mission.nodeFile.name,
+            })
+        if mission.edgeFile.encrypted:
+            response.append({
+                'id': mission.edgeFile.id,
+                'content': 'edge',
+                'name': mission.nodeFile.name,
+            })
+        return success(data=response)
+    
+    def post(self, request):
+        user = request.user
+        password = request.data.get('password')
+        fileId = request.data.get('fileId')
+        action = request.data.get('action')
+        try:
+            file = File.objects.get(id=fileId)
+        except File.DoesNotExist:
+            return failed(message="文件不存在")
+        if file.user != user:
+            return failed(message="非本用户上传文件")
+        if action == 'encrypt':
+            # 加密
+            if not password:
+                return failed(message="未提供加密密钥")
+            if file.encrypt(password):
+                return success(message="加密成功")
+            else:
+                return failed(message="加密失败")
+        elif action == 'decrypt':
+            # 解密
+            if not password:
+                return failed(message="未提供解密密钥")
+            if file.decrypted(password):
+                return success(message="解密成功")
+            else:
+                return failed(message="密码错误")
+        elif action == 'verify':
+            # 验证
+            if not password:
+                return failed(message="未提供验证密钥")
+            if file.verify(password):
+                # 使用直接赋值方式更新 Session
+                encrypt_keys = request.session.get('encrypt-keys', {})
+                encrypt_keys[str(file.id)] = password
+                request.session['encrypt-keys'] = encrypt_keys  # 覆盖赋值
+                request.session.modified = True  # 强制标记为已修改
+                return success(message="验证成功")
+            else:
+                return failed(message="验证失败")
+        # 不满足以上条件
+        else:
+            return failed(message="不支持的文件加、解密行为")
+
+
 class PlanAPI(APIView):
     def get(self, request):
         user = request.user
@@ -222,6 +319,12 @@ class PlanAPI(APIView):
         except Mission.DoesNotExist:
             logger.error(f"处理规划所属任务不存在{request.GET.get('mission')}")
             return failed(message="未找到规划任务所属任务")
+        # 检查mission是否有加密文件,如有加密,则检查sessin中是否有解密密钥
+        for file in [mission.nodeFile, mission.edgeFile]:
+            if file.encrypted:
+                # 如果文件被加密,查找是否有解密密钥,并验证密钥正确性
+                if not file.verify(request.session.get('encrypt-keys', {})[str(file.id)]):
+                    return failed(message="存在加密文件,且未通过解密验证")
         plans = mission.own_plans.all()
         response = []
         for plan in plans:
@@ -324,10 +427,13 @@ class MissionsAPI(APIView):
         for mission in missions:
             nodesInfo = mission.nodeFile.own_file_info
             edgesInfo = mission.edgeFile.own_file_info
+            encrpted = mission.nodeFile.encrypted or mission.edgeFile.encrypted
             response.append({
                 'id': mission.id,
                 'name': mission.name,
+                'description': mission.description,
                 'createTime': mission.create_time,
+                'encrypted': encrpted,
                 'nodesInfo': {
                     'S': nodesInfo.sNodes,
                     'D': nodesInfo.dNodes,
@@ -340,4 +446,17 @@ class MissionsAPI(APIView):
                 'status': mission.state,
                 'state': mission.state,
             })
-        return success(data=response)
+        return success(data=response)
+    
+
+class MissionStatusAPI(APIView):
+    def get(self, request):
+        user =  request.user
+        missionId = int(request.GET.get('missionId'))
+        try:
+            mission = Mission.objects.get(id=missionId)
+        except Mission.DoesNotExist:
+            logger.error(f"获取任务状态失败,任务{missionId}不存在")
+            return failed(message="获取任务状态失败,任务不存在")
+        return success(data={'status': mission.state})
+    

+ 57 - 33
backend/api/api_rawDataTrans.py

@@ -36,18 +36,26 @@ class RawDataTrans(APIView):
     def post(self, request):
         # 处理进程反馈计算结果
         progress_result = request.data.get('result')
-        logger.info(progress_result)
         performance = request.data.get('performance')
-
+        # logger.info(progress_result)
         for progress_data in progress_result:
             mission = Mission.objects.get(id=int(progress_data['missionId']))
+            # 收到结果反馈,表示该mission一定开始计算了
+            # 注意当missin的状态不是init时,一定在其他地方先被修改,所以不能在此处修改
+            if mission.state == 'init':
+                mission.state = 'calculating'
+                mission.save()
             plan = Plan.objects.get(id=int(progress_data['planId']))
             if 'nodes' in progress_data:
-                nodes = progress_data['nodes']
+                nodes = []
+                for node in progress_data['nodes']:
+                    nodes.append([node['id'], node['type'], node['meta']])
             else:
                 nodes = None
             if 'edges' in progress_data:
-                edges = progress_data['edges']
+                edges = []
+                for edge in progress_data['edges']:
+                    edges.append([edge['from'], edge['to'], edge['meta']])
             else:
                 edges = None
             progress = progress_data['progress']
@@ -62,35 +70,6 @@ class RawDataTrans(APIView):
                     return failed(message="缺少结果参数")
             try:
                 result = plan.own_result
-                if int(progress) == 100:
-                    # 任务完成后需要保存结果文件
-                    # 读取nodes和edges,生成结果文件
-                    nodeFile = File(type='csv', usage='result', content='node', user=plan.user)
-                    nodeFile.save()
-                    if not nodeFile.generate(nodes) == OK:
-                        logger.error("保存计算结果文件失败")
-                        return failed(message="保存节点结果文件失败")
-                    edgeFile = File(type='csv', usage='result', content='edge', user=plan.user)
-                    edgeFile.save()
-                    if not edgeFile.generate(edges) == OK:
-                        logger.error("保存计算结果文件失败")
-                        return failed(message="保存边结果文件失败")
-                    nodeFile.associate = edgeFile
-                    edgeFile.associate = nodeFile
-                    nodeFile.save()
-                    edgeFile.save()
-
-                    # 将文件与结果绑定
-                    result.nodeFile = nodeFile
-                    result.edgeFile = edgeFile
-                    result.progress = 100
-                    result.save()
-
-                else:
-                    # 进度不到百分百,正在执行中,仅更新进度数值
-                    # 注意使用负数进度值表示单个处理失败或整个任务失败
-                    result.progress = int(progress)
-                    result.save()
             except Result.DoesNotExist:
                 # 不存在结果文件,需要新建
                 result = Result()
@@ -99,6 +78,51 @@ class RawDataTrans(APIView):
                 result.user = plan.user
                 result.progress = int(progress)
                 result.save()
+            if int(progress) == 100:
+                # 任务完成后需要保存结果文件
+                # 读取nodes和edges,生成结果文件
+                nodeFile = File(type='csv', usage='result', content='node', user=plan.user)
+                nodeFile.save()
+                if not nodeFile.generate(nodes) == OK:
+                    logger.error("保存计算结果文件失败")
+                    return failed(message="保存节点结果文件失败")
+                edgeFile = File(type='csv', usage='result', content='edge', user=plan.user)
+                edgeFile.save()
+                if not edgeFile.generate(edges) == OK:
+                    logger.error("保存计算结果文件失败")
+                    return failed(message="保存边结果文件失败")
+                nodeFile.associate = edgeFile
+                edgeFile.associate = nodeFile
+                nodeFile.save()
+                edgeFile.save()
+
+                # 将文件与结果绑定
+                result.nodeFile = nodeFile
+                result.edgeFile = edgeFile
+                result.progress = 100
+                result.save()
+
+                # 检查该任务是否下属Plan全部完成
+                missionCompleted = True
+                for p in mission.own_plans.all():
+                    try:
+                        r = p.own_result
+                        if not r.progress == 100:
+                            missionCompleted = False
+                    except Result.DoesNotExist:
+                        # 存在一个没有结果的plan,mission肯定没有计算完毕
+                        missionCompleted = False
+                        break
+                if missionCompleted:
+                    logger.error("检测认为mission已结束")
+                    mission.state = 'done'
+                    mission.save()
+            else:
+                # 进度不到百分百,正在执行中,仅更新进度数值
+                # 注意使用负数进度值表示单个处理失败或整个任务失败
+                result.progress = int(progress)
+                result.save()
+            
         
         # 计算系统性能占用信息
         system_performance = performance['system']

+ 15 - 0
backend/api/api_results.py

@@ -27,21 +27,36 @@ class Results(APIView):
         except Mission.DoesNotExist:
             return failed(message="任务不存在")
         resultsData = []
+        # 待办:是否需要在此处检测任务完成与否?在rowDataTrans中也有检测,仅当后台scheduler没有反馈正确结果时才需要用到该处的检测
+        missionCompleted = True
         for plan in mission.own_plans.all():
             try:
                 if not plan.parent:
                     # 根节点不是实际的plan,没有result
                     continue
                 result = plan.own_result
+                if not result.progress == 100:
+                    missionCompleted = False
                 resultsData.append({
                     'planId': plan.id,
                     'progress': result.progress,
                     'resultId': result.id,
                 })
             except Result.DoesNotExist:
+                # 存在没有结果的plan,mission仍未结束
+                missionCompleted = False
                 resultsData.append({
                     'planId': plan.id,
                     'progress': 0,
                     'resultId': 0,
                 })
+        # 任务下属Plan已全部完成
+        if missionCompleted:
+            mission.state = 'done'
+            mission.save()
+
+        resultsData.append({
+            'missionId': mission.id,
+            'status': mission.state,
+        })
         return success(data=resultsData)

+ 3 - 0
backend/api/api_user.py

@@ -68,6 +68,9 @@ class UserLoginAPI(APIView):
                 token = Token.objects.get(user=user).key
             else:
                 token = Token.objects.create(user=user).key
+            request.session.create()  
+            user.session_key = request.session.session_key
+            user.save()
             return success(message="登录成功", data={
                 'username': user.username,
                 'displayName': user.displayname,

+ 32 - 0
backend/api/middleware.py

@@ -0,0 +1,32 @@
+# middleware.py
+from django.contrib.sessions.backends.db import SessionStore
+from rest_framework.authtoken.models import Token
+
+class TokenSessionMiddleware:
+    def __init__(self, get_response):
+        self.get_response = get_response
+
+    def __call__(self, request):
+        # 在请求处理前绑定 Session
+        if 'Authorization' in request.headers:
+            auth_header = request.headers['Authorization']
+            if auth_header.startswith('Token '):
+                token_key = auth_header.split(' ')[1]
+                try:
+                    user = Token.objects.get(key=token_key).user
+                    if user.session_key:
+                        # 创建 Session 实例并加载数据
+                        session = SessionStore(session_key=user.session_key)
+                        session.load()
+                        request.session = session
+                except (Token.DoesNotExist, AttributeError):
+                    pass
+
+        # 继续处理请求(包括视图函数)
+        response = self.get_response(request)
+        
+        # 响应阶段可选的保存逻辑
+        if hasattr(request, 'session') and request.session.modified:
+            request.session.save()
+        
+        return response

+ 38 - 0
backend/api/migrations/0022_mission_description_alter_alert_name_and_more.py

@@ -0,0 +1,38 @@
+# Generated by Django 4.2 on 2025-05-12 00:33
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ("api", "0021_rename_indicator_alert_metric"),
+    ]
+
+    operations = [
+        migrations.AddField(
+            model_name="mission",
+            name="description",
+            field=models.CharField(default="无描述", max_length=128),
+        ),
+        migrations.AlterField(
+            model_name="alert",
+            name="name",
+            field=models.CharField(default="未命名告警规则", max_length=64, unique=True),
+        ),
+        migrations.AlterField(
+            model_name="mission",
+            name="state",
+            field=models.CharField(
+                choices=[
+                    ("init", "init"),
+                    ("calculating", "calculating"),
+                    ("pause", "pause"),
+                    ("stop", "stop"),
+                    ("done", "done"),
+                ],
+                default="init",
+                max_length=32,
+            ),
+        ),
+    ]

+ 18 - 0
backend/api/migrations/0023_file_encrypted.py

@@ -0,0 +1,18 @@
+# Generated by Django 4.2 on 2025-05-12 03:10
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ("api", "0022_mission_description_alter_alert_name_and_more"),
+    ]
+
+    operations = [
+        migrations.AddField(
+            model_name="file",
+            name="encrypted",
+            field=models.BooleanField(default=False),
+        ),
+    ]

+ 18 - 0
backend/api/migrations/0024_file_key.py

@@ -0,0 +1,18 @@
+# Generated by Django 4.2 on 2025-05-12 09:15
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ("api", "0023_file_encrypted"),
+    ]
+
+    operations = [
+        migrations.AddField(
+            model_name="file",
+            name="key",
+            field=models.CharField(blank=True, max_length=128, null=True),
+        ),
+    ]

+ 18 - 0
backend/api/migrations/0025_user_session_key.py

@@ -0,0 +1,18 @@
+# Generated by Django 4.2 on 2025-05-12 12:31
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ("api", "0024_file_key"),
+    ]
+
+    operations = [
+        migrations.AddField(
+            model_name="user",
+            name="session_key",
+            field=models.CharField(blank=True, max_length=40),
+        ),
+    ]

BIN
backend/api/migrations/__pycache__/0001_initial.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0002_alter_user_options_user_last_login.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0003_view_file.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0004_rename_display_name_user_displayname_file_usage.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0005_file_associate_file_content.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0006_alter_file_associate.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0007_fileinfo.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0008_mission_result.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0009_alter_fileinfo_file_alter_mission_name.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0010_algorithm_plan.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0011_result_plan_result_state.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0012_result_edgefile_result_nodefile_alter_result_plan.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0013_remove_result_state_alter_file_usage.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0014_result_progress.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0015_mission_state.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0016_alter_result_edgefile_alter_result_nodefile.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0017_graph_graphtoken.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0018_rename_edgemap_graph_edges_and_more.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0019_alter_graph_user.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0020_alert_systemperformance.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0021_rename_indicator_alert_metric.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0022_mission_description_alter_alert_name_and_more.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0023_file_encrypted.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0024_file_key.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/0025_user_session_key.cpython-310.pyc


BIN
backend/api/migrations/__pycache__/__init__.cpython-310.pyc


BIN
backend/api/models/__pycache__/__init__.cpython-310.pyc


BIN
backend/api/models/__pycache__/alert.cpython-310.pyc


BIN
backend/api/models/__pycache__/algorithm.cpython-310.pyc


BIN
backend/api/models/__pycache__/file.cpython-310.pyc


BIN
backend/api/models/__pycache__/graph.cpython-310.pyc


BIN
backend/api/models/__pycache__/mission.cpython-310.pyc


BIN
backend/api/models/__pycache__/plan.cpython-310.pyc


BIN
backend/api/models/__pycache__/result.cpython-310.pyc


BIN
backend/api/models/__pycache__/system.cpython-310.pyc


BIN
backend/api/models/__pycache__/user.cpython-310.pyc


BIN
backend/api/models/__pycache__/view.cpython-310.pyc


+ 238 - 21
backend/api/models/file.py

@@ -5,6 +5,14 @@ from api.utils import *
 import json
 from random import randint
 import logging
+from django.http import FileResponse
+from Crypto.Cipher import ARC4
+from Crypto.Protocol.KDF import PBKDF2
+from Crypto.Hash import SHA512
+from django.contrib.auth.hashers import make_password
+from io import TextIOWrapper, BytesIO
+from ast import literal_eval
+from typing import Any
 
 types = [
     ('csv', 'csv'),
@@ -24,6 +32,49 @@ contents = [
 
 logger = logging.getLogger("file-model")
 
+# 盐值
+salt = "vrServer"
+# 加密
+def rc4_encrypt(key: bytes, data: bytes) -> bytes:
+    cipher = ARC4.new(key)
+    cipher.encrypt(b'\x00' * 1024)  # 丢弃前1024字节密钥流
+    return cipher.encrypt(data)
+# 解密
+def rc4_decrypt(key: bytes, data: bytes) -> bytes:
+    return rc4_encrypt(key, data)
+# 由密码生成密钥
+def derive_key(password: str, salt: bytes, iterations: int) -> bytes:
+    return PBKDF2(
+        password.encode('utf-8'),
+        salt,
+        dkLen=32,  # 生成256位密钥
+        count=iterations,
+        hmac_hash_module=SHA512
+    )
+# 安全解析json
+def safe_json_parse(json_str: str, default: Any = None) -> Any:
+    # 预处理空字符串
+    stripped_str = json_str.strip()
+    if not stripped_str:
+        return default if default is not None else []
+
+    try:
+        data = json.loads(stripped_str)
+    except json.JSONDecodeError:
+        return default if default is not None else []
+
+    # 递归检查嵌套空列表
+    def is_empty_nested_list(obj):
+        if isinstance(obj, list):
+            return all(is_empty_nested_list(item) for item in obj)
+        return False
+
+    # 如果是空列表或嵌套空列表,返回默认值
+    if data == [] or is_empty_nested_list(data):
+        return default if default is not None else []
+    
+    return data
+
 class FileManager(models.Manager):
     def getHistory(self, user):
         # try:
@@ -31,8 +82,17 @@ class FileManager(models.Manager):
         history = []
         for file in files:
             fileId = file.id
+            if file.content == "node" and not file.own_missions_node.exists():
+                # 输入的节点文件没有对应的任务,应该删除
+                file.delete()
+                continue
+            if file.content == "edge" and not file.own_missions_edge.exists():
+                # 输入的边文件没有对应的任务,应该删除
+                continue
+                    
             directory = os.path.join(BASE_FILE_PATH, str(user.id))
             path = os.path.join(directory, str(fileId))
+            
             try:
                 size = os.path.getsize(path)
             except FileNotFoundError:
@@ -70,6 +130,7 @@ class FileManager(models.Manager):
                 'name': file.name,
                 'uploadTime': file.update_time,
                 'size': size,
+                'encrypted': file.encrypted,
                 'content': file.content,
                 'missions': [{'id': mission.id, 'name': mission.name} for mission in missions],
                 'fileInfo': fileInfo,
@@ -89,12 +150,123 @@ class File(models.Model):
     create_time = models.DateTimeField(auto_now_add=True)
     update_time = models.DateTimeField(auto_now=True)
     content = models.CharField(choices=contents, max_length=10)
+    encrypted = models.BooleanField(default=False)
+    key = models.CharField(blank=True, null=True, max_length=128)
     associate = models.ForeignKey('self', on_delete=models.CASCADE, blank=True, null=True)
     
     user = models.ForeignKey(to="api.User", on_delete=models.CASCADE, related_name='own_files')
     
     objects = FileManager()
 
+    def encrypt(self, password):
+        # 该密码仅用于验证
+        verifyPassword = make_password(
+            password,
+            salt='vrviewer',
+            hasher='pbkdf2_sha256'
+            )
+        self.key = verifyPassword
+        if self.encrypted:
+            logger.error(f"文件{self.id}已经过加密,无法再次加密")
+            return False
+        else:
+            # 仍使用用户输入密码加密
+            key = derive_key(
+                password=password,
+                salt=salt,
+                iterations=4,
+            )
+            path = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.id))
+            with open(path, 'rb') as f:
+                original_data = f.read()
+            with open(path, 'wb') as f:
+                f.write(rc4_encrypt(key, original_data))
+            self.encrypted = True
+            self.save()
+            return True
+    
+    def decrypted(self, password):
+        # 仅用于验证
+        verifyPassword = make_password(
+            password,
+            salt='vrviewer',
+            hasher='pbkdf2_sha256'
+            )
+        if not verifyPassword == self.key:
+            logger.error(f"文件{self.id}解密密钥错误")
+            return False
+        if not self.encrypted:
+            logger.error(f"文件{self.id}未经过加密,无法进行解密")
+            return False
+        else:
+            key = derive_key(
+                password=password,
+                salt=salt,
+                iterations=4,
+            )
+            path = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.id))
+            with open(path, 'rb') as f:
+                original_data = f.read()
+            with open(path, 'wb') as f:
+                f.write(rc4_decrypt(key, original_data))
+            self.encrypted = False
+            self.save()
+            return True
+    
+    def decryptToData(self, password):
+        # 仅用于验证
+        verifyPassword = make_password(
+            password,
+            salt='vrviewer',
+            hasher='pbkdf2_sha256'
+            )
+        if not verifyPassword == self.key:
+            logger.error(f"文件{self.id}解密密钥错误")
+            return False
+        if not self.encrypted:
+            logger.error(f"文件{self.id}未经过加密,无法进行解密")
+            return False
+        else:
+            key = derive_key(
+                password=password,
+                salt=salt,
+                iterations=4,
+            )
+            path = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.id))
+            with open(path, 'rb') as f:
+                original_data = f.read()
+                return TextIOWrapper( BytesIO(rc4_decrypt(key, original_data)), encoding='utf-8', newline='')
+                
+
+    def verify(self, password):
+        verifyPassword = make_password(
+            password,
+            salt='vrviewer',
+            hasher='pbkdf2_sha256'
+            )
+        if not self.encrypted:
+            logger.error(f"文件{self.id}未经过加密,无法进行解密验证")
+            return False
+        if not verifyPassword == self.key:
+            logger.error(f"文件{self.id}验证密钥错误")
+            return False
+        return True
+    
+    
+
+
+    def download(self):
+        path = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.id))
+        if not os.path.exists(path):
+            return False
+        # 加密后文件也不允许下载
+        if self.encrypted:
+            return False
+        else:
+            response = FileResponse(open(path), 'rb')
+            response['Content-Disposition'] = f'attachment; filename="{self.name}"'
+            return FileResponse(open(path, 'rb'))
+
     def saveWithInfo(self):
         path = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.id))
         if self.content in ['node', 'nodes']:
@@ -217,7 +389,7 @@ class File(models.Model):
                         logger.error("check file illegal failed node id wrong")
                         return False
                     if not line[1] in ['S', 'D', 'I']:
-                        logger.error("check file illegal failed node type wrong")
+                        logger.error(f"check file illegal failed node type wrong:{line}")
                         return False
                     if line[0] not in nodes:
                         nodes.append(line[0])
@@ -256,9 +428,19 @@ class File(models.Model):
                         return False
                 return True
     
-    def toJson(self):
-        path = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.id))
-        file = csv.reader(open(path, 'r'))
+    def toJson(self, request=None):
+        # 检查是否为加密文件,只有当文件usage为input时才应该存在加密属性
+        if self.usage == 'input' and self.encrypted:
+            # 如果被加密则需要从request中获取解密密钥
+            key = request.session.get('encrypt-keys', {}).get(str(self.id), '')
+            if key:
+                file = csv.reader(self.decryptToData(key))
+            else:
+                raise KeyError(f"解密文件{self.id}所需密钥不存在")
+        else:
+            path = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.id))
+            file = csv.reader(open(path, 'r'))
+
         if self.content == 'node':
             if self.type == 'csv':
                 nodes = []
@@ -266,20 +448,37 @@ class File(models.Model):
                     # 如果有额外数据,则放入第三个字段中
                     node = {'id': line[0], 'type': line[1], 'meta': []}
                     for el in range(2, len(line)):
-                        logger.error(el)
-                        node['meta'].append(json.loads(el))
+                        # 对于meta字段,写入时数据为不带双引号,以冒号分割的字串
+                        # 或者是直接正常的json字段,应尝试两种方式解析
+                        try:
+                            metaJson = safe_json_parse(line[el].replace('\'', '\"'))
+                            # 检测是否嵌套过多
+                            while metaJson:
+                                if type(metaJson[0]) == list:
+                                    metaJson = metaJson[0]
+                                else:
+                                    break
+                            node['meta'] = metaJson
+                        except Exception as error:
+                            logger.info(f"尝试以json格式解析文件meta内容{line[el]}失败,尝试以非标准格式解析{error}")
+                            # 尝试以冒号分隔格式解析
+                            elList = el.split(':')
+                            if len(elList) != 2:
+                                logger.info(f"尝试以非标准格式解析文件meta内容{el}失败,放弃解析")
+                                continue
+                            else:
+                                node['meta'].append({
+                                    elList[0]: elList[1]
+                                })
                     
-                    # 测试用,添加optimize
-                    el = '{"optimize": "old"}'
-                    node['meta'].append(json.loads(el))
+                    # # 测试用,添加optimize
+                    # el = '{"optimize": "old"}'
+                    # node['meta'].append(json.loads(el))
 
-                    # 测试用,添加group
-                    el = '{"group": "' + str(randint(1,5)) + '"}'
-                    node['meta'].append(json.loads(el))
-                    
+                    # # 测试用,添加group
+                    # el = '{"group": "' + str(randint(1,5)) + '"}'
+                    # node['meta'].append(json.loads(el))
                     nodes.append(node)
-
-
                 return nodes
         if self.content == 'edge':
             if self.type == 'csv':
@@ -288,14 +487,32 @@ class File(models.Model):
                     # 如果有额外数据,则放入第三个字段中
                     edge = {'from': line[0], 'to': line[1], 'meta': []}
                     for el in range(2, len(line)):
-                        edge['meta'].append(json.loads(el))
+                        try:
+                            metaJson = safe_json_parse(line[el].replace('\'', '\"'))
+                            # 检测是否嵌套过多
+                            while metaJson:
+                                if type(metaJson[0]) == list:
+                                    metaJson = metaJson[0]
+                                else:
+                                    break
+                            edge['meta'] = metaJson
+                        except Exception as error:
+                            logger.info(f"尝试以json格式解析文件meta内容{line[el]}失败,尝试以非标准格式解析{error}")
+                            # 尝试以冒号分隔格式解析
+                            elList = el.split(':')
+                            if len(elList) != 2:
+                                logger.info(f"尝试以非标准格式解析文件meta内容{el}失败,放弃解析")
+                                continue
+                            else:
+                                edge['meta'].append({
+                                    elList[0]: elList[1]
+                                })
                     
-                    # 测试用,添加optimize
-                    el = '{"optimize": "old"}'
-                    edge['meta'].append(json.loads(el))
-
-
+                    # # 测试用,添加optimize
+                    # el = '{"optimize": "old"}'
+                    # edge['meta'].append(json.loads(el))
                     edges.append(edge)
+                    # logger.info(edges)
                 return edges
 
     def deleteStorage(self):

+ 46 - 19
backend/api/models/graph.py

@@ -58,6 +58,7 @@ class GraphManager(models.Manager):
     '''功能体探测功能的三维坐标生成 start'''
     def createFromResultGroupAlgo(self, result):
         print("Group3D")
+        
         # 参数配置
         GROUP_SPHERE_RADIUS = 10.0  # 社团分布球体半径
         D_CORE_RADIUS = 1.5         # D类节点核心区半径
@@ -66,6 +67,9 @@ class GraphManager(models.Manager):
         nodeJson = result.nodeFile.toJson()
         edgeJson = result.edgeFile.toJson()
         
+        print(nodeJson)
+        print(edgeJson)
+
         # 内部函数
         def _uniform_sphere_sampling(n, radius=1.0):
             """均匀球面采样"""
@@ -111,7 +115,7 @@ class GraphManager(models.Manager):
                     if dist_to_center > 2*SI_SHELL_RADIUS:
                         attraction[i] = GROUP_ATTRACTION * dir_to_center
                 
-                # 更新位置
+                # 更新位置`1`
                 movement = np.sum(repulsion[:, :, None] * diffs, axis=1) + attraction
                 positions += 0.1 * movement
             
@@ -123,16 +127,27 @@ class GraphManager(models.Manager):
         
         # 内部函数
         def _generate_si_coordinates(num_points, radius):
-            # 生成随机方向
-            points = np.random.randn(num_points, 3)  # 标准正态分布采样
-            
-            # 归一化到单位球面
-            norms = np.linalg.norm(points, axis=1, keepdims=True)
-            points_normalized = points / norms
-            
-            # 缩放到目标半径
-            points_scaled = points_normalized * radius
-            return points_scaled
+            # 当输入节点数量小于3时,使用极坐标手动直接生成
+            if num_points < 3:
+                # 当节点数小于3时,使用极坐标手动生成
+                angles = np.linspace(0, 2*np.pi, num_points)
+                points = np.column_stack([
+                    np.cos(angles),
+                    np.sin(angles),
+                    np.zeros(num_points)
+                ]) * radius
+                return points
+            else:
+                # 节点数大于3时,自动生成
+                # 生成随机方向
+                points = np.random.randn(num_points, 3)  # 标准正态分布采样
+                # 归一化到单位球面
+                norms = np.linalg.norm(points, axis=1, keepdims=True)
+                points_normalized = points / norms
+                
+                # 缩放到目标半径
+                points_scaled = points_normalized * radius
+                return points_scaled
 
 
         # 按group分组
@@ -142,8 +157,6 @@ class GraphManager(models.Manager):
             for meta in node['meta']:
                 if 'group' in meta:
                     group_id = meta['group']
-            if not group_id:
-                print(node, group_id, "非Group优化结果被用于进行Group图形布局生成")
             groups.setdefault(group_id, {'D': [], 'SI': []})
             if node['type'] == 'D':
                 groups[group_id]['D'].append(node['id'])
@@ -189,14 +202,28 @@ class GraphManager(models.Manager):
             
             # SI类节点:分布在球壳层
             shell_radius = SI_SHELL_RADIUS + 0.5*np.abs(np.random.randn())  # 添加随机扰动
+
             points = _generate_si_coordinates(len(data['SI']), shell_radius)
-            # 使用球形Voronoi分布避免重叠
-            sv = SphericalVoronoi(points, radius=shell_radius)
-            sv.sort_vertices_of_regions()
             
-            for i, node_id in enumerate(data['SI']):
-                point = sv.points[i] * shell_radius
-                node_coords[node_id] = center + point
+            # 添加维度校验
+            if len(points) >= 3: 
+                try:
+                    sv = SphericalVoronoi(points, radius=shell_radius)
+                    sv.sort_vertices_of_regions()
+                    # 使用Voronoi顶点分配坐标
+                    for i, node_id in enumerate(data['SI']):
+                        point = sv.points[i] * shell_radius
+                        node_coords[node_id] = center + point
+                except ValueError as e:
+                    # 降级处理:直接使用生成的点
+                    print(f"Voronoi生成失败: {str(e)}, 使用原始点")
+                    for i, node_id in enumerate(data['SI']):
+                        node_coords[node_id] = center + points[i]
+            else:
+                # 当节点数<3时直接使用极坐标点
+                for i, node_id in enumerate(data['SI']):
+                    node_coords[node_id] = center + points[i]
+                    
         
         # === 步骤3: 全局优化 ===
         # return _optimize_layout(node_coords, groups, group_coords)

+ 1 - 0
backend/api/models/mission.py

@@ -19,6 +19,7 @@ class MissionManager(models.Manager):
 
 class Mission(models.Model):
     name = models.CharField(default="未命名任务", max_length=64)
+    description = models.CharField(default="无描述", max_length=128)
     create_time = models.DateTimeField(auto_now_add=True)
     update_time = models.DateTimeField(auto_now=True)
     nodeFile = models.ForeignKey(to="api.File", on_delete=models.CASCADE, related_name="own_missions_node")

+ 2 - 1
backend/api/models/user.py

@@ -40,7 +40,8 @@ class User(AbstractBaseUser):
     displayname = models.CharField(max_length=32)
     create_time = models.DateField(auto_now_add=True)
     identity = models.CharField(default='user', max_length=16, choices=identities)
-    
+    session_key = models.CharField(max_length=40, blank=True)
+
     objects = UserManager()
     USERNAME_FIELD = 'username'
     REQUIRED_FIELDS = []

+ 12 - 7
backend/api/tokenAuthentication.py

@@ -1,20 +1,25 @@
 from rest_framework.authentication import BaseAuthentication
 from rest_framework import exceptions
 from rest_framework.authtoken.models import Token
+from django.contrib.sessions.backends.db import SessionStore
 import time
 from api.models import User
 
 class TokenAuthentication(BaseAuthentication):
     def authenticate(self, request):
         try:
-            token = request.headers['Authorization'].replace("Token ", "")
-            if Token.objects.filter(key=token).exists():
-                user_id = Token.objects.filter(key=token).first().user_id
-                user= User.objects.get(id=user_id)
-                return (user, token)
-            return None
+            auth_header = request.headers.get('Authorization', '')
+            if auth_header.startswith('Token '):
+                token = auth_header.split(' ')[1]
+                if Token.objects.filter(key=token).exists():
+                    user_id = Token.objects.filter(key=token).first().user_id
+                    user= User.objects.get(id=user_id)
+                    return (user, token)
+                return None
+            else:
+                raise exceptions.AuthenticationFailed("用户信息认证失败")
             
             
         except Exception as error:
             print(error)
-            raise exceptions.AuthenticationFailed("Failed pass authenticate")
+            raise exceptions.AuthenticationFailed("用户信息认证失败")

+ 5 - 1
backend/api/urls.py

@@ -1,7 +1,7 @@
 from django.urls import path
 from .api_user import UserRegisterAPI, UserLoginAPI, UserUpdateAPI , UserSelectAPI , PassUpdateAPI
 from .api_user import getDashboard
-from .api_prepare import UploadFileAPI, PlanAPI, InputFileAPI, MissionsAPI
+from .api_prepare import UploadFileAPI, PlanAPI, InputFileAPI, DownloadFileAPI, MissionsAPI, MissionStatusAPI, EncryptFileAPI
 from .api_calculate import CalculateAPI
 from .api_rawDataTrans import RawDataTrans
 from .api_results import Results
@@ -16,6 +16,8 @@ urlpatterns = [
     path('register/', UserRegisterAPI.as_view(), name='user_register_api'),
     path('login/', UserLoginAPI.as_view(), name='user_login_api'),
     path('uploadfile/', UploadFileAPI.as_view(), name='upload_file_api'),
+    path('downloadfile/', DownloadFileAPI.as_view(), name='download_file_api'),
+    path('encryptfile/', EncryptFileAPI.as_view(), name='encrypt_file_api'),
     path('plan/', PlanAPI.as_view(), name='plan_api'),
     path('getDashboard/', getDashboard.as_view()),
     path('calculate/', CalculateAPI.as_view(), name='calculate_api'),
@@ -28,6 +30,8 @@ urlpatterns = [
     path('systemPerformance/', SystemPerformanceAPI.as_view(), name="system_performance_api"),
     path('inputFile/', InputFileAPI.as_view(), name='input_file_api'),
     path('missions/', MissionsAPI.as_view(), name="missions_api"),
+    path('missionStatus/', MissionStatusAPI.as_view(), name="mission_status_api"),
+
 
     path('selectUser/', UserSelectAPI.as_view(), name='user_select_api'),
     path('updateUser/', UserUpdateAPI.as_view(), name='user_update_api'),

BIN
backend/backend/__pycache__/__init__.cpython-310.pyc


BIN
backend/backend/__pycache__/settings.cpython-310.pyc


BIN
backend/backend/__pycache__/urls.cpython-310.pyc


BIN
backend/backend/__pycache__/wsgi.cpython-310.pyc


+ 1 - 0
backend/backend/settings.py

@@ -54,6 +54,7 @@ REST_FRAMEWORK = {
 MIDDLEWARE = [
     'django.middleware.security.SecurityMiddleware',
     'django.contrib.sessions.middleware.SessionMiddleware',
+    'api.middleware.TokenSessionMiddleware',
     'corsheaders.middleware.CorsMiddleware',
     'django.middleware.common.CommonMiddleware',
     'django.middleware.csrf.CsrfViewMiddleware',

BIN
backend/db.sqlite3


BIN
scheduler/__pycache__/processManager.cpython-310.pyc


BIN
scheduler/__pycache__/processManager.cpython-38.pyc


BIN
scheduler/__pycache__/utils.cpython-310.pyc


BIN
scheduler/__pycache__/utils.cpython-38.pyc


+ 96 - 0
scheduler/algo1Folder/AUC.py

@@ -0,0 +1,96 @@
+import numpy as np
+import random
+import time
+
+def Calculation_AUC(MatrixAdjacency_Train, MatrixAdjacency_Test, Matrix_similarity, MaxNodeNum):
+    AUC_TimeStart = time.clock()
+    print
+    '    Calculation AUC......'
+    AUCnum = 672400
+
+    Matrix_similarity = np.triu(Matrix_similarity - Matrix_similarity * MatrixAdjacency_Train)
+    Matrix_NoExist = np.ones(MaxNodeNum) - MatrixAdjacency_Train - MatrixAdjacency_Test - np.eye(MaxNodeNum)
+
+    Test = np.triu(MatrixAdjacency_Test)
+    NoExist = np.triu(Matrix_NoExist)
+
+    #     Test_num =len(np.argwhere(Test == 1))
+    #     NoExist_num = len(np.argwhere(NoExist == 1))
+    # # #   Test_num = np.nonzero(Test)[0].shape[0]
+    # # #   NoExist_num = np.nonzero(NoExist)[0].shape[0]
+
+    Test_num = len(np.argwhere(Test == 1))
+    NoExist_num = len(np.argwhere(NoExist == 1))
+    #     print '    Test_num:%d'%Test_num
+    #     print '    NoExist_num:%d'%NoExist_num
+
+    Test_rd = [int(x) for index, x in enumerate((Test_num * np.random.rand(1, AUCnum))[0])]
+    NoExist_rd = [int(x) for index, x in enumerate((NoExist_num * np.random.rand(1, AUCnum))[0])]
+    #     print '    Test_rd:'+str(Test_rd)
+    #     print '    Test_rd长度:'+str(len(Test_rd))
+    #     print '    Test_rd最大值:'+str(max(Test_rd))
+    #     print '    NoExist_rd:'+str(NoExist_rd)
+    #     print '    NoExist_rd长度:'+str(len(NoExist_rd))
+    TestPre = Matrix_similarity * Test
+    NoExistPre = Matrix_similarity * NoExist
+
+    TestIndex = np.argwhere(Test == 1)
+    Test_Data = np.array([TestPre[x[0], x[1]] for index, x in enumerate(TestIndex)]).T
+    NoExistIndex = np.argwhere(NoExist == 1)
+    NoExist_Data = np.array([NoExistPre[x[0], x[1]] for index, x in enumerate(NoExistIndex)]).T
+    #     print Test_Data
+    #     print Test_Data.shape
+    #     print NoExist_Data
+    #     print NoExist_Data.shape
+
+    Test_rd = np.array([Test_Data[x] for index, x in enumerate(Test_rd)])
+    NoExist_rd = np.array([NoExist_Data[x] for index, x in enumerate(NoExist_rd)])
+    #     print Test_rd
+    #     print Test_rd.shape
+    #     print NoExist_rd
+    #     print NoExist_rd.shape
+
+    #     aucArray = Test_rd - NoExist_rd
+    #     n1 = len(np.argwhere(aucArray > 0))
+    #     n2 = len(np.argwhere(aucArray == 0))
+    n1, n2 = 0, 0
+    for num in range(AUCnum):
+        if Test_rd[num] > NoExist_rd[num]:
+            n1 += 1
+        elif Test_rd[num] == NoExist_rd[num]:
+            n2 += 0.5
+        else:
+            n1 += 0
+    auc = float(n1 + n2) / AUCnum
+    print('    AUC指标为:%f' % auc)
+    AUC_TimeEnd = time.clock()
+    print('    AUCTime:%f s' % (AUC_TimeEnd - AUC_TimeStart))
+    return auc
+
+
+# 随机选择元素进行比较,并计算得分
+def calculate_score(N, train, text, score, n):
+    total_score = 0.0
+    for i in range(n):
+        while True:
+            random_row = random.randint(0, N-1)
+            random_col = random.randint(0, N-1)
+            if train[random_row][random_col] == 0:
+                rand_index_train = [random_row, random_col]
+                break
+        while True:
+            random_row = random.randint(0, N-1)
+            random_col = random.randint(0, N-1)
+            if text[random_row][random_col] == 1:
+                rand_index_text = [random_row, random_col]
+                break
+
+        # 计算得分
+        ##print("text_score:", score[rand_index_text[0]][rand_index_text[1]])
+        ##print("train_score:", score[rand_index_train[0]][rand_index_train[1]])
+        if score[rand_index_text[0]][rand_index_text[1]] > score[rand_index_train[0]][rand_index_train[1]]:
+            total_score += 1.0
+        elif score[rand_index_text[0]][rand_index_text[1]] == score[rand_index_train[0]][rand_index_train[1]]:
+            total_score += 0.5
+
+    return total_score

BIN
scheduler/algo1Folder/__pycache__/AUC.cpython-38.pyc


+ 93 - 0
scheduler/algo1Folder/controller-back.py

@@ -0,0 +1,93 @@
+import requests
+import os
+import json
+import logging
+import time
+import csv
+
+''' 准备数据 '''
+SCHEDULER_BASE_URL = os.getenv("SCHEDULER_BASE_URL")
+BACKEND_BASE_URL = os.getenv("BACKEND_BASE_URL")
+
+missionId = os.getenv("missionId")
+planId = os.getenv("planId")
+
+headers = {
+    "Content-Type": "application/json",  # 明确声明数据格式
+    "Accept": "application/json"         # 声明期望的响应格式
+}
+params = {
+    "missionId": missionId,
+    "planId": planId,
+}
+
+print(json.dumps({'msg': 'started'}), flush=True)
+
+response = requests.get(SCHEDULER_BASE_URL + '/fetchData', params=params, headers=headers)
+data = response.json()
+if not data:
+    quit()
+print(json.dumps({'msg': 'start', 'data': data}), flush=True)
+
+''' 开始计算 '''
+progressData = {
+    'missionId': missionId,
+    'planId': planId,
+    'progress': 0,
+}
+print(json.dumps({'msg': 'progress', 'data': progressData}), flush=True)
+while progressData['progress'] < 100:
+    start_time = time.perf_counter()
+    count = 0
+    while True:
+        count += 1
+        if time.perf_counter() - start_time >= 1.0:
+            break   
+    progressData['progress'] += 5
+    print(json.dumps({'msg': 'progress', 'data': progressData}), flush=True)
+
+start_time = time.perf_counter()
+count = 0
+while True:
+    count += 1
+    if time.perf_counter() - start_time >= 5.0:
+        break   
+
+''' 完成计算 '''
+nodeFile = open('nodes.csv', 'r', newline='')
+nodeCsv = csv.reader(nodeFile)
+edgeFile = open('edges.csv', 'r', newline='')
+edgeCsv = csv.reader(edgeFile)
+
+nodes = []
+edges = []
+for node in nodeCsv:
+    nodes.append([node[0], node[1]])
+for edge in edgeCsv:
+    edges.append([edge[0], edge[1]])
+
+result = {
+    'missionId': missionId,
+    'planId': planId,
+    'progress': 100,
+    'nodes': nodes,
+    'edges': edges,
+}
+print(json.dumps({'msg': 'result', 'data': result}), flush=True)
+
+# if response:
+#     if response.json()['code'] == 'OK':
+#         print("response is ok")
+
+#         response = requests.post(BACKEND_BASE_URL + "/rawDataTrans/", json={
+#             'missionId': missionId,
+#             'planId': planId,
+#             'progress': 100,
+#             'nodes': [[1, 'S'], [2, 'D'], [3, 'D'], [4, 'I']],
+#             'edges': [[1, 2], [1, 4], [2, 4], [3, 4]],
+#         })
+#         print(f"算法控制程序推送结果完毕 MissionId: {missionId} PlanId: {planId} Message: {response.json()}")
+#     else:
+#         print(f"算法控制程序结果反馈未被识别 MissionId: {missionId} PlanId: {planId}")
+# else:
+#     print(f"算法控制程序结果反馈失败 MissionId: {missionId} PlanId: {planId}")

+ 904 - 58
scheduler/algo1Folder/controller.py

@@ -1,14 +1,36 @@
-import requests
-import os
+from AUC import *
+import copy
+import xlrd
+import math
+from xlrd import xldate_as_tuple
+import datetime
+import numpy as np
+import seaborn as sns
+import matplotlib.pyplot as plt
+import random
+import copy
+import xlwt
+import time
+import logging
 import json
+import csv
+import os
+import requests
+from scipy.interpolate import splrep, splev
 import logging
-import time
+import traceback
+
+logging.basicConfig(
+    format='%(asctime)s [%(levelname)s] %(message)s',
+    datefmt='%Y-%m-%d %H:%M:%S',
+    level=logging.INFO
+)
+logger = logging.getLogger(__name__)
 
 ''' 准备数据 '''
 SCHEDULER_BASE_URL = os.getenv("SCHEDULER_BASE_URL")
 BACKEND_BASE_URL = os.getenv("BACKEND_BASE_URL")
 
-logger = logging.getLogger("algoA logger")
 missionId = os.getenv("missionId")
 planId = os.getenv("planId")
 
@@ -21,61 +43,885 @@ params = {
     "planId": planId,
 }
 
-print(json.dumps({'msg': 'started'}), flush=True)
+print("[output]", json.dumps({'msg': 'started'}), flush=True)
 
 response = requests.get(SCHEDULER_BASE_URL + '/fetchData', params=params, headers=headers)
-data = response.json()
-if not data:
+fetchedData = response.json()
+if not fetchedData:
+    # 此处应当放置错误报告
     quit()
-print(json.dumps({'msg': 'start', 'data': data}), flush=True)
+# fetchedData: {'nodes': [] , 'edges': []}
+'''准备数据(完毕)'''
 
-''' 开始计算 '''
-progressData = {
-    'missionId': missionId,
-    'planId': planId,
-    'progress': 0,
-}
-print(json.dumps({'msg': 'progress', 'data': progressData}), flush=True)
-while progressData['progress'] < 100:
-    start_time = time.perf_counter()
-    count = 0
-    while True:
-        count += 1
-        if time.perf_counter() - start_time >= 1.0:
-            break   
-    progressData['progress'] += 5
-    print(json.dumps({'msg': 'progress', 'data': progressData}), flush=True)
-
-start_time = time.perf_counter()
-count = 0
-while True:
-    count += 1
-    if time.perf_counter() - start_time >= 5.0:
-        break   
-
-''' 完成计算 '''
-result = {
-    'missionId': missionId,
-    'planId': planId,
-    'progress': 100,
-    'nodes': [[1, 'S'], [2, 'D'], [3, 'D'], [4, 'I']],
-    'edges': [[1, 2], [1, 4], [2, 4], [3, 4]],
-}
-print(json.dumps({'msg': 'result', 'data': result}), flush=True)
-
-# if response:
-#     if response.json()['code'] == 'OK':
-#         print("response is ok")
-
-#         response = requests.post(BACKEND_BASE_URL + "/rawDataTrans/", json={
-#             'missionId': missionId,
-#             'planId': planId,
-#             'progress': 100,
-#             'nodes': [[1, 'S'], [2, 'D'], [3, 'D'], [4, 'I']],
-#             'edges': [[1, 2], [1, 4], [2, 4], [3, 4]],
-#         })
-#         print(f"算法控制程序推送结果完毕 MissionId: {missionId} PlanId: {planId} Message: {response.json()}")
-#     else:
-#         print(f"算法控制程序结果反馈未被识别 MissionId: {missionId} PlanId: {planId}")
-# else:
-#     print(f"算法控制程序结果反馈失败 MissionId: {missionId} PlanId: {planId}")
+
+directory = r'测试输出'
+
+def import_excel(excel):
+    for rown in range(excel.nrows):
+        array = [0 for i in range(2)]
+
+        array[0] = table.cell_value(rown, 0)
+
+        array[1] = table.cell_value(rown, 1)
+
+        tables.append(array)
+
+def import_csv(data):
+    tables = []
+    for row in data:
+        array = [int(row[0]), int(row[1])]  # 假设 CSV 格式为两列整数
+        tables.append(array)
+    return tables
+
+
+class Matrix:
+
+    def __init__(self, row, column, fill=0):
+        self.shape = (row, column)
+        self.row = row
+        self.column = column
+        self._matrix = [[fill] * column for i in range(row)]
+
+    # 返回元素m(i, j)的值:  m[i, j]
+    def __getitem__(self, index):
+        if isinstance(index, int):
+            return self._matrix[index - 1]
+        elif isinstance(index, tuple):
+            return self._matrix[index[0] - 1][index[1] - 1]
+
+    # 设置元素m(i,j)的值为s:  m[i, j] = s
+    def __setitem__(self, index, value):
+        if isinstance(index, int):
+            self._matrix[index - 1] = copy.deepcopy(value)
+        elif isinstance(index, tuple):
+            self._matrix[index[0] - 1][index[1] - 1] = value
+
+    def __add__(self, N):
+        '''加法'''
+        # A + B
+        assert N.shape == self.shape, "维度不匹配,不能相加"
+        M = Matrix(self.row, self.column)
+        for r in range(self.row):
+            for c in range(self.column):
+                M[r, c] = self[r, c] + N[r, c]
+        return M
+
+    def __eq__(self, N):
+        # A == B
+        assert isinstance(N, Matrix), "注:类型不匹配,不能比较"
+        return N.shape == self.shape  # 比较维度
+
+    # def show(self):
+    #     # 输出函数
+    #     for r in range(self.row):  # 遍历
+    #         for c in range(self.column):
+    #             print(round(self[r, c + 1], 2), end='  ')
+    #             ##print(self[r, c + 1], end='  ')
+    #         print()
+
+
+def main_process():
+    # print("//---------data set description---------//")
+    # 导入需要读取的Excel表格的路径
+    # filename_edge = 'edges.xlsx'
+    # file_path_edge = fr'{directory}\{filename_edge}'
+    # data_edge = xlrd.open_workbook(file_path_edge)
+
+
+    # filename_node = 'nodes.xlsx'
+    # file_path_node = fr'{directory}\{filename_node}'
+    # data_node = xlrd.open_workbook(file_path_node)
+
+    # 修改从flask获取nodes数据
+    data_node = fetchedData['nodes']
+    # 清空所有原有meta
+    for node in data_node:
+        node['meta'] = []
+    # 修改从flask获取edges数据
+    data_edge = fetchedData['edges'] # [{'form': X, 'to': X, 'meta': [{}{}{}]}]
+    # 清空所有原有meta
+    for edge in data_edge:
+        edge['meta'] = []
+    # 检测节点编号,本程序要求从0起
+    flag = True
+    for node in data_node:
+        if int(node['id']) == 0:
+            flag = False
+    if flag:
+        # 原始数据不从0开始,则所有节点的id均减一,同时边的节点id也减一
+        for node in data_node:
+            node['id'] = int(node['id']) - 1
+        for edge in data_edge:
+            edge['from'] = int(edge['from']) - 1
+            edge['to'] = int(edge['to']) - 1
+
+    Data_scale = 10000
+
+    # 准备汇报flask的数据
+    result = {
+        'missionId': missionId,
+        'planId': planId,
+        'progress': 100,
+        'nodes': [],
+        'edges': [],
+    }
+
+    # 获取第一个工作表
+    # sheet_node = data_node.sheet_by_index(0)
+    # 获取表格的行数
+    # N = sheet_node.nrows
+    # 改为从flask数据获取点的数量
+    N = len(data_node)
+    # print("数据集(点集)长度:      %d" % (N))
+    # 初始化计数器
+    count_s = 0
+    count_d = 0
+    count_i = 0
+    # 遍历第二列数据,统计“S”、“D”和“I”的个数
+    # for row_index in range(N):  # 跳过第一行标题行
+    # 改为遍历flask获取的数据
+    for node in data_node:
+        # cell_value = sheet_node.cell_value(row_index, 1)  # 获取第二列单元格的值
+        # 改为从flask数据获取节点的类型
+        cell_value = node['type'].upper()
+        if cell_value == "S":
+            count_s += 1
+        elif cell_value == "D":
+            count_d += 1
+        elif cell_value == "I":
+            count_i += 1
+    # print("其中包括传感节点(S):   %d" % (count_s))
+    # print("其中包括决策节点(D):   %d" % (count_d))
+    # print("其中包括响应节点(I):   %d" % (count_i))
+
+    # 统计行数,输出结果
+    # sheet_edge = data_edge.sheet_by_index(0)
+    # edge_num = sheet_edge.nrows
+    # 修改为从flask数据中统计边数
+    edge_num = len(data_edge)
+    # print("数据集(边集)长度:      %d" % (edge_num))
+
+    sum_num = N * N + count_s * count_s + count_d * count_d
+    Sparsity = (sum_num - edge_num) / sum_num * 100
+    # print("数据稀疏性:           " + str(round(Sparsity, 2)) + "%")
+    # print("//--------------------------------------//")
+
+    # 循环次数
+    epoch = 10
+    auc_sum = 0
+    method_time_sum = 0
+
+    for epoch_num in range(epoch):
+        # 初始化上报给flask的数据
+        result['edges'] = []
+        result['nodes'] = []
+        # logging.basicConfig(
+        #     format='%(asctime)s    [%(levelname)s]  %(message)s',
+        #     datefmt='%d %b %H:%M'
+        # )
+        # logging.getLogger().setLevel(logging.DEBUG)
+        # logger = logging.getLogger()
+        # logger.info("Meta_AIProbS")
+
+        # time.sleep(0.1)
+        # 定义需要删除的元素比例
+        p = 0.2
+        test_set = p * 100
+        train_set = 100 - test_set
+        # print("number of experiments:            " + str(epoch_num + 1))
+        # print("proportion of training set:       " + str(train_set) + "%")
+        # print("proportion of testing set:        " + str(test_set) + "%")
+
+        TimeStart = time.perf_counter()
+
+        ##邻接矩阵
+        m = Matrix(N, N, fill=0)
+        train = Matrix(N, N, fill=0)
+        text = Matrix(N, N, fill=0)
+
+        # table = data_edge.sheets()[0]
+        # 创建一个空列表,存储Excel的数据
+        tables = []
+        # tables中存储的是边数据
+        # 将excel表格内容导入到tables列表中
+        # import_excel(table)
+        # 改为从flask数据中直接产生tables
+        for edge in data_edge:
+            # 将每一条边的起始存入edge
+            tables.append([int(edge['from']), int(edge['to'])])
+
+
+        ##初始化训练集
+        for i in tables:
+            train[int(i[0])][int(i[1])] = 1
+            train[int(i[1])][int(i[0])] = 1
+
+        # 计算需要删除的元素个数
+        num_delete = int(len(tables) * p)
+        deleted=[]
+        # 随机选择num_delete个元素进行删除
+        if num_delete > 0:
+            idx = random.sample(range(len(tables)), num_delete)
+            deleted = [tables[i] for i in idx]
+            tables = [i for j, i in enumerate(tables) if j not in idx]
+
+        # 将剩余的元素输出到文件中
+        workbook = xlwt.Workbook()
+        worksheet = workbook.add_sheet('My Worksheet')
+        for i in range(len(tables)):
+            worksheet.write(i, 0, tables[i][0])
+            worksheet.write(i, 1, tables[i][1])
+            # 剩余边需要写入反馈给flask的数据
+            # 保存所有出现的点
+            row_index = int(tables[i][0])
+            col_index = int(tables[i][1])
+            if not any(int(row_index) == node['id'] for node in result['nodes']):
+                meta = [node['meta'] for node in data_node if int(node['id']) == int(row_index)][0]
+                node_type = [n['type'] for n in data_node if int(n['id']) == row_index][0]
+                result['nodes'].append({'id': int(row_index), 'type': node_type.upper(), 'meta': meta})
+
+            if not any(int(col_index) == node['id'] for node in result['nodes']):
+                meta = [node['meta'] for node in data_node if int(node['id']) == int(col_index)][0]
+                node_type = [n['type'] for n in data_node if int(n['id']) == col_index][0]
+                result['nodes'].append({'id': int(col_index), 'type': node_type.upper(), 'meta': meta})
+            # 将结果写入result汇报给flask
+            result['edges'].append({'from': int(row_index), 'to': int(col_index), 'meta': [{ 'optimize': 'old' }]})
+
+        filename_text = '边集(教师模型测试).xlsx'
+        file_path_text = fr'{directory}\{filename_text}'
+        workbook.save(file_path_text)
+
+        '''
+        # 输出成功删除的元素个数和总元素个数
+        print("成功删除%d个元素,剩余%d个元素。" % (num_delete, len(tables)))
+
+        # 输出被删除的元素
+        if num_delete > 0:
+            print("被删除的元素为:")
+            for i in range(len(deleted)):
+                print(deleted[i])
+        else:
+            print("没有删除任何元素。")
+        '''
+
+        for i in tables:
+            m[int(i[0])][int(i[1])] = 1
+            m[int(i[1])][int(i[0])] = 1
+
+        for i in range(N):
+            for j in range(N):
+                if (train[i][j] == 1 and m[i][j] == 0):
+                    text[i][j] = 1
+
+        #对删除后的训练集进行操作
+        two_path = Matrix(N, N, fill=0)  # 二阶路径矩阵
+        third_path = Matrix(N, N, fill=0)  # 三阶路径矩阵
+
+        # 路径矩阵的计算
+        for i in range(N):
+            for j in range(N):
+                if (m[i][j] == 1):
+                    for k in range(N):
+                        if ((m[j][k] == 1) & (k != i)):
+                            two_path[i][k] += 1
+                            for l in range(N):
+                                if ((m[k][l] == 1) & (l > i) & (m[i][l] != 1)):
+                                    third_path[i][l] += 1
+
+        for i in range(N):
+            for j in range(N):
+                if (i > j):
+                    third_path[i][j] =  third_path[j][i]
+
+        tables = []
+        # table = data_node.sheets()[0]
+        m_2 = ["0" for i in range(N)]
+        # import_excel(table)
+        # 改为从flask数据获取node类型数据
+        for node in data_node:
+            m_2[int(node['id'])] = str(node['type']).upper()
+        # j = 0
+        # for i in tables:
+        #     m_2[j] = i[1]
+        #     j += 1
+
+        # print("Calculating H-index......")
+        TimeStart_1 = time.perf_counter()
+
+        matrix = [[0 for i in range(N)] for i in range(50)]
+
+        for i in range(N):
+            sum = 0
+            for j in range(N):
+                sum = sum + m[i][j]
+            matrix[0][i] = sum
+
+        if (N < Data_scale):
+            flag = 0
+            s = 0
+            while (flag < N):
+                flag = 0
+                for k in range(N):
+                    for i in range(matrix[s][k]):
+                        sum = 0
+                        for j in range(N):
+                            if (m[k][j] == 1) and (matrix[s][j] >= i + 1):
+                                sum += 1
+                        if sum > i:
+                            matrix[s + 1][k] = i + 1
+
+                for l in range(N):
+                    if matrix[s + 1][l] == matrix[s][l]:
+                        flag += 1
+                s += 1
+
+        else:
+            flag = 0
+            rule = int(N / 100) + 1
+            for ruler in range(rule):
+                half_a = 100 * ruler
+                half_b = min(100 * (ruler + 1), N)
+                s = 0
+                while (flag < half_b):
+                    flag = half_a
+                    for k in range(half_a, half_b):
+                        for i in range(matrix[s][k]):
+
+                            sum = 0
+                            for j in range(half_a, half_b):
+                                if (m[k][j] == 1) and (matrix[s][j] >= i + 1):
+                                    sum += 1
+                            if sum > i:
+                                matrix[s + 1][k] = i + 1
+
+                    for l in range(half_a, half_b):
+                        if matrix[s + 1][l] == matrix[s][l]:
+                            flag += 1
+                    s += 1
+            s = s + 3
+        ##print("s:",s)
+        """
+        for l in range(N):
+            if matrix[s-2][l] != matrix[s-1][l]:
+                for i in range(s):
+                    print(matrix[i][l])
+        """
+
+        TimeEnd_1 = time.perf_counter()
+        # print("Time for calculating H-index:     " + str(round(TimeEnd_1 - TimeStart_1, 2)) + "s")
+
+        #将被删除的边单独存入一个文件中
+        workbook = xlwt.Workbook()
+        worksheet = workbook.add_sheet('My Worksheet')
+        for i in range(num_delete):
+            worksheet.write(i, 0, deleted[i][0])
+            worksheet.write(i, 1, m_2[int(deleted[i][0])])
+            worksheet.write(i, 2, deleted[i][1])
+            worksheet.write(i, 3, m_2[int(deleted[i][1])])
+            worksheet.write(i, 4, str(two_path[int(int(deleted[i][0]))][int(deleted[i][1])]))
+            worksheet.write(i, 5, str(third_path[int(int(deleted[i][0]))][int(deleted[i][1])]))
+            i += 1
+        filename_pre = '边集(教师测试删除).xlsx'
+        file_path_pre = fr'{directory}\{filename_pre}'
+        workbook.save(file_path_pre)
+
+        n = Matrix(N, s, fill=0)
+        for i in range(N):
+            for j in range(s):
+                n[i][j] = matrix[j][i]
+
+        # score = Matrix(N, N, fill=0)
+        score = [[0] * N for i in range(N)]
+        ##D节点列表
+        num_D = 0
+        count_D = [0 for i in range(N)]
+        j = 0
+        for i in range(N):
+            if m_2[i] == 'D':
+                num_D += 1
+                count_D[j] = i
+                j += 1
+        j = 0
+        node_D = [0 for i in range(num_D)]
+        for i in range(num_D):
+            node_D[i] = count_D[i]
+            ##print(node_D[i])
+        ##print("D节点列表")
+
+        ##S节点列表
+        num_S = 0
+        count_S = [0 for i in range(N)]
+        j = 0
+        for i in range(N):
+            if m_2[i] == 'S':
+                num_S += 1
+                count_S[j] = i
+                j += 1
+        j = 0
+        node_S = [0 for i in range(num_S)]
+        for i in range(num_S):
+            node_S[i] = count_S[i]
+            ##print(node_S[i])
+        ##print("S节点列表")
+
+        # print("Probabilistic Spreading......")
+        TimeStart_2 = time.perf_counter()
+
+        if (N < Data_scale):
+            ##相似性矩阵
+            a = Matrix(N, N, fill=0)
+            for i in range(N):
+                for j in range(N):
+                    sum_1 = 0
+                    sum_2 = 0
+                    sum_3 = 0
+                    for k in range(s):
+                        sum_1 += n[i][k] * n[j][k]
+                        sum_2 += n[i][k] * n[i][k]
+                        sum_3 += n[j][k] * n[j][k]
+                    if (i == j) or (sum_2 == 0) or (sum_3 == 0):
+                        a[i][j] = 0
+                    else:
+                        a[i][j] = sum_1 / (math.sqrt(sum_2) * math.sqrt(sum_3))
+            ##a.show()
+
+            ##归一化处理
+            sum = 0
+            for i in range(N):
+                for j in range(N):
+                    sum += a[i][j]
+                if (sum != 0):
+                    for k in range(N):
+                        a[i][k] = 1 * a[i][k] / sum
+                ##print(sum)
+                sum = 0
+            ##print("//-------------------------------------------//")
+            ##a.show()
+            ##print("归一化处理")
+
+            matrix_D = [[0 for i in range(N)] for i in range(num_D)]
+            for i in range(num_D):
+                ##for i in range(1):
+                count = 1
+                count_this = 0
+                tmp = [[0 for i_num in range(num_D)] for i_num in range(2)]
+                for j in range(num_D):
+                    tmp[0][j] = -1
+                tmp[0][0] = node_D[i]
+                tmp[1][0] = 1
+                while (count_this < count):
+                    ##print("lunshu:",count_this+1)
+                    sum = 0
+                    for j in range(N):
+                        if (m[tmp[0][count_this]][j] == 1):
+                            """
+                            print(tmp[0][count_this])
+                            if (m_2[j] == 'D'):
+                                print("D:",[j])
+                            else:
+                                print([j])
+                            """
+                            flag = 0
+                            for k in tmp[0]:
+                                if k == j:
+                                    flag = 1
+                            if (flag == 0):
+                                sum += a[tmp[0][count_this]][j]
+                    ##print("sum:", sum)
+                    ##sum_2 = 0
+                    for j in range(N):
+                        if (m[tmp[0][count_this]][j] == 1):
+                            if m_2[j] != 'D':
+                                matrix_D[i][j] += a[tmp[0][count_this]][j] * tmp[1][count_this] / sum
+                                ##sum_2 += b[i][j]
+                            else:
+                                flag = 0
+                                for k in tmp[0]:
+                                    if k == j:
+                                        flag = 1
+                                if (flag == 0):
+                                    tmp[0][count] = j
+                                    ##print("tmp:",count,"  ",j)
+                                    tmp[1][count] = a[tmp[0][count_this]][j] * tmp[1][count_this] / sum
+                                    ##print("tmp:", count, "  ", tmp[1][count])
+                                    ##sum_2 += tmp[1][count]
+                                    count += 1
+                    ##print("sum_2:",sum_2)
+                    tmp[1][count_this] = 0
+                    count_this += 1
+            ##print("D节点矩阵")
+            """
+            for i in range(num_D):
+                print(tmp[0][i])
+                print(tmp[1][i])
+
+            for j in range(num_D):
+                num = 0
+                for i in range(N):
+                    num += matrix_D[j][i]
+                    ##print(i)
+                    ##print(matrix_D[0][i])
+                print(num)
+            """
+
+            matrix_S = [[0 for i in range(N)] for i in range(num_S)]
+            for i in range(num_S):
+                ##for i in range(1):
+                count = 1
+                count_this = 0
+                tmp = [[0 for i in range(num_S)] for i in range(2)]
+                for j in range(num_S):
+                    tmp[0][j] = -1
+                tmp[0][0] = node_S[i]
+                tmp[1][0] = 1
+                while (count_this < count):
+                    sum = 0
+                    num = 0
+                    for j in range(N):
+                        if (m[tmp[0][count_this]][j] == 1 and m_2[j] != 'D'):
+                            '''
+                            print(tmp[0][count_this])
+                            if (m_2[j] == 'S'):
+                                print("S:", [j])
+                            else:
+                                print([j])
+                            '''
+
+                            flag = 0
+                            for k in tmp[0]:
+                                if k == j:
+                                    flag = 1
+                            if (flag == 0):
+                                sum += a[tmp[0][count_this]][j]
+                            num += 1
+
+                            '''
+                            print("lunshu:",num)
+                            print("count_this:",count_this)
+                            print("count:", count)
+                            '''
+
+                    for j in range(N):
+                        if (m[tmp[0][count_this]][j] == 1):
+                            if m_2[j] == 'I':
+                                matrix_S[i][j] += a[tmp[0][count_this]][j] * tmp[1][count_this] / sum
+                            if m_2[j] == 'S':
+                                flag = 0
+                                for k in tmp[0]:
+                                    if k == j:
+                                        flag = 1
+                                if (flag == 0):
+                                    tmp[0][count] = j
+                                    tmp[1][count] = a[tmp[0][count_this]][j] * tmp[1][count_this] / sum
+                                    count += 1
+                                    '''
+                                    print("//////////////")
+                                    for g in range(count):
+                                        print(tmp[0][g])
+                                    print("//////////////")
+                                    '''
+                    tmp[1][count_this] = 0
+                    count_this += 1
+            ##print("S节点矩阵")
+            '''
+            for j in range(num_S):
+                num = 0
+                for i in range(N):
+                    num += matrix_S[j][i]
+                    ##print(i)
+                    ##print(matrix_S[0][i])
+                print(num)
+            '''
+            for i in range(num_D):
+                ##for i in range(1):
+                for j in range(N):
+                    if (matrix_D[i][j] > 0):
+                        sum = 0
+                        for k in node_D:
+                            if (m[j][k] == 1):
+                                sum += a[j][k]
+                        for k in node_D:
+                            if (m[j][k] == 1):
+                                matrix_D[i][k] += matrix_D[i][j] * a[j][k] / sum
+                        matrix_D[i][j] = 0
+            '''
+            for j in range(num_D):
+                num = 0
+                for i in range(N):
+                    num += matrix_D[j][i]
+                    #print(i)
+                    #print(matrix_D[0][i])
+                print(num)
+            '''
+            for i in range(num_S):
+                ##for i in range(1):
+                for j in range(N):
+                    if (matrix_S[i][j] > 0):
+                        sum = 0
+                        for k in node_S:
+                            if (m[j][k] == 1):
+                                sum += a[j][k]
+                        for k in node_S:
+                            if (m[j][k] == 1):
+                                matrix_S[i][k] += matrix_S[i][j] * a[j][k] / sum
+                        matrix_S[i][j] = 0
+
+            re_D = Matrix(num_D, num_D, fill=0)
+            for i in range(num_D):
+                for j in range(num_D):
+                    re_D[j][i] = matrix_D[i][node_D[j]]
+                ##re_D.show()
+                '''
+                sum = 0
+                for i in range(num_D):
+                    for j in range(num_D):
+                        sum += re_D[j][i]
+                    print(sum)
+                    sum = 0
+                '''
+                '''
+                for i in range(num_D):
+                    sum = 0
+                    for j in range(num_D):
+                        sum += re_D[i][j]
+                    print(sum)
+                '''
+
+            re_S = Matrix(num_S, num_S, fill=0)
+            for i in range(num_S):
+                for j in range(num_S):
+                    re_S[j][i] = matrix_S[i][node_S[j]]
+                ##re_S.show()
+
+                '''
+                for i in range(num_S):
+                    sum = 0
+                    for j in range(num_S):
+                        sum += re_S[i][j]
+                    print(sum)
+                '''
+
+            for i in range(N):
+                if (m_2[i] != 'D'):
+                    for j in range(num_D):
+                        if (m[i][node_D[j]] == 0):
+                            for k in range(num_D):
+                                if (m[i][node_D[k]] == 1):
+                                    score[i][node_D[j]] += re_D[j][k]
+                if (m_2[i] == 'I'):
+                    for j in range(num_S):
+                        if (m[i][node_S[j]] == 0):
+                            for k in range(num_S):
+                                if (m[i][node_S[k]] == 1):
+                                    score[i][node_S[j]] += re_S[j][k]
+            ##score.show()
+
+            for i in node_D:
+                for j in node_D:
+                    score[i][j] = a[i][j]
+            for i in node_S:
+                for j in node_S:
+                    score[i][j] = a[i][j]
+
+            for i in range(N):
+                for j in range(N):
+                    if (text[i][j] == 1 and score[i][j] == 0):
+                        score[i][j] = a[i][j]
+
+        else:
+
+            a = Matrix(N, N, fill=0)
+            rule = int(N / 100) + 1
+            for ruler in range(rule):
+                half_a = 100 * ruler
+                half_b = min(100 * (ruler + 1), N)
+                for i in range(half_a, half_b):
+                    for j in range(half_a, half_b):
+                        sum_1 = 0
+                        sum_2 = 0
+                        sum_3 = 0
+                        for k in range(s):
+                            sum_1 += n[i][k] * n[j][k]
+                            sum_2 += n[i][k] * n[i][k]
+                            sum_3 += n[j][k] * n[j][k]
+                        if (i == j) or (sum_2 == 0) or (sum_3 == 0):
+                            a[i][j] = 0
+                        else:
+                            a[i][j] = sum_1 / (math.sqrt(sum_2) * math.sqrt(sum_3))
+
+
+            rule = int(N / 100) + 1
+            for ruler in range(rule):
+                half_a = 100 * ruler
+                half_b = min(100 * (ruler + 1), N)
+                for i in range(half_a, half_b):
+                    sum = 0
+                    for j in range(half_a, half_b):
+                        sum += a[i][j]
+                    if (sum != 0):
+                        for j in range(half_a, half_b):
+                            a[i][k] = 1 * a[i][k] / sum
+
+            matrix_D = [[0 for i in range(N)] for i in range(num_D)]
+            for i in range(num_D):
+                sum = 0
+                for j in range((node_D[i] // 100) * 100, min((node_D[i] // 100 + 1) * 100, N)):
+                    if (m[node_D[i]][j] == 1 and m_2[j] != 'D'):
+                        sum += a[node_D[i]][j]
+                for j in range((node_D[i] // 100) * 100, min((node_D[i] // 100 + 1) * 100, N)):
+                    if (m[node_D[i]][j] == 1 and m_2[j] != 'D'):
+                        matrix_D[i][j] += a[node_D[i]][j] / sum
+
+            for i in range(num_D):
+                ##for i in range(1):
+                for j in range((node_D[i] // 100) * 100, min((node_D[i] // 100 + 1) * 100, N)):
+                    if (matrix_D[i][j] > 0):
+                        sum = 0
+                        for k in node_D:
+                            if (m[j][k] == 1):
+                                sum += a[j][k]
+                        for k in node_D:
+                            if (m[j][k] == 1):
+                                matrix_D[i][k] += matrix_D[i][j] * a[j][k] / sum
+                        matrix_D[i][j] = 0
+
+            re_D = Matrix(num_D, num_D, fill=0)
+            for i in range(num_D):
+                for j in range(num_D):
+                    re_D[j][i] = matrix_D[i][node_D[j]]
+
+            random_number = random.uniform(0.9, 0.95)
+            for i in range(N):
+                for j in range(N):
+                    if (text[i][j] == 1):
+                        score[i][j] = random_number * a[i][j]
+
+        TimeEnd_2 = time.perf_counter()
+        # print("Time for probabilistic spreading: " + str(round(TimeEnd_2 - TimeStart_2, 2)) + "s")
+
+        # 将预测的元素输出到文件中
+        workbook = xlwt.Workbook()
+        worksheet = workbook.add_sheet('My Worksheet')
+        n = num_delete
+        score_array = np.array(score)
+        indices = np.argsort(-score_array, axis=None)[:n]
+        i = 0
+
+        # 添加汇报flask相关逻辑
+        for index in indices:
+            row_index, col_index = divmod(index, N)
+            ##print("链路", i+1, "存在的可能性:", round(score[row_index + 1, col_index + 1],2), ";节点对:(", row_index, ",", col_index, ")")
+            worksheet.write(i, 0, str(row_index))
+            worksheet.write(i, 1, m_2[row_index])
+
+            worksheet.write(i, 2, str(col_index))
+            worksheet.write(i, 3, m_2[col_index])
+
+            # 保存所有出现的点
+            if not any(int(row_index) == node['id'] for node in result['nodes']):
+                meta = [node['meta'] for node in data_node if int(node['id']) == int(row_index)][0]
+                result['nodes'].append({'id': int(row_index), 'type': str(m_2[row_index]).upper(), 'meta': meta})
+            if not any(int(col_index) == node['id'] for node in result['nodes']):
+                meta = [node['meta'] for node in data_node if int(node['id']) == int(col_index)][0]
+                result['nodes'].append({'id': int(col_index), 'type': str(m_2[col_index]).upper(), 'meta': meta})
+            # 将结果写入result汇报给flask
+            result['edges'].append({'from': int(row_index), 'to': int(col_index), 'meta': [{ 'optimize': 'new' }]})
+
+            worksheet.write(i, 4, str(two_path[int(row_index)][int(col_index)]))
+            worksheet.write(i, 5, str(third_path[int(row_index)][int(col_index)]))
+            i += 1
+        filename_pre = '边集(教师预测).xlsx'
+        file_path_pre = fr'{directory}\{filename_pre}'
+        workbook.save(file_path_pre)
+
+        TimeEnd = time.perf_counter()
+
+        # print("Calculating AUC......")
+
+        TimeStart_3 = time.perf_counter()
+
+        '''
+        train_indices = []
+        for i in range(N):
+            for j in range(N):
+                if (train[i][j] == 0):
+                    train_indices.append((i, j))
+
+        text_indices = []
+        for i in range(N):
+            for j in range(N):
+                if (text[i][j] == 1):
+                    text_indices.append((i, j))
+        '''
+
+        n_auc = 100  # 假设重复100次计算得分
+        auc = calculate_score(N, train, text, score, n_auc)/n_auc
+
+        TimeEnd_3 = time.perf_counter()
+
+        method_time = (TimeEnd_2 - TimeStart_2) + (TimeEnd_1 - TimeStart_1)
+        # print("Time for calculating AUC:         " + str(round(TimeEnd_3 - TimeStart_3, 2)) + "s")
+        # print("value of AUC:                     " + str(round(auc, 2)))
+        # print("Time for AIProbS method:          " + str(round(method_time, 2)) + "s")
+
+        auc_sum += auc
+        method_time_sum += method_time
+
+        print("[output]", json.dumps({'msg': 'progress', 'data': epoch_num * 10}), flush=True)
+
+    # 进行边的去重
+    seen = set()
+    temp_result_edges = []
+    for edge in result['edges']:
+        a, b = edge["from"], edge["to"]
+        # 生成标准化键(小值在前,大值在后)
+        key = tuple(sorted((a, b)))
+        if key not in seen:
+            seen.add(key)
+            temp_result_edges.append(edge)
+    result['edges'] = temp_result_edges
+
+    # 还需要进行序号的重新排布
+    # 序号从0开始,将所有跳过的重新给予序号
+    # 保证最终生成的节点和边中序号连续
+    # 按原始id排序节点
+    sorted_nodes = sorted(result['nodes'], key=lambda x: x['id'])
+    # 创建旧id到新id的映射字典(新id从0开始连续)
+    old_to_new = {node['id']: idx for idx, node in enumerate(sorted_nodes)}
+    # 生成新的节点列表(连续id)
+    new_nodes = [
+        {'id': idx, 'type': node['type'], 'meta': node['meta']}
+        for idx, node in enumerate(sorted_nodes)
+    ]
+    # 生成新的边列表(同步更新id)
+    new_edges = [
+        {'from': old_to_new[edge['from']], 'to': old_to_new[edge['to']], 'meta': edge['meta']}
+        for edge in result['edges']
+        if edge['from'] in old_to_new and edge['to'] in old_to_new
+    ]
+    result['nodes'] = new_nodes
+    result['edges'] = new_edges
+
+
+    print("[output]", json.dumps({'msg': 'result', 'data': result}), flush=True)
+    # logger.info("the average time of Meta_AIProbS:  " + str(round(method_time_sum / epoch, 2)) + "s")
+    # logger.info("the average value of AUC:          " + str(round(auc_sum / epoch, 2)))
+
+    # show = [[0 for i in range(num_D)] for i in range(num_D)]
+    # for i in range(num_D):
+    #     for j in range(num_D):
+    #         show[i][j] = re_D[i][j]
+    # ax = sns.heatmap(show, vmin=0, linewidths=0, vmax=1, cmap="RdBu_r")
+    # ##ax = sns.heatmap(values, vmin=0, linewidths=0, vmax=100, cmap="summer")
+    # plt.show()
+    # figure = ax.get_figure()
+    # figure.savefig('sns_heatmap.jpg')  # 保存图片
+
+if __name__ == '__main__':
+    try:
+        logger.info(f"进程开始")
+        main_process()
+    except Exception as error:
+        print(error)
+        logger.error(f"处理程序报错{str(error)}")
+        logger.error(traceback.format_exc())

+ 336 - 0
scheduler/algo1Folder/edges.csv

@@ -0,0 +1,336 @@
+1,113
+1,114
+2,93
+2,113
+3,62
+3,109
+3,113
+4,3
+4,113
+5,97
+6,40
+6,80
+6,113
+7,13
+7,102
+7,108
+7,113
+8,99
+8,102
+8,113
+9,15
+9,69
+9,102
+9,113
+10,27
+10,99
+11,54
+11,97
+11,99
+12,97
+12,99
+12,109
+13,113
+14,94
+14,111
+14,113
+15,17
+15,97
+15,99
+15,113
+16,109
+17,97
+17,99
+17,113
+18,74
+18,113
+19,102
+20,99
+21,102
+22,113
+23,45
+23,52
+23,113
+24,99
+24,102
+25,72
+25,99
+26,28
+26,62
+26,99
+26,113
+27,34
+27,86
+27,99
+27,102
+27,113
+28,99
+28,113
+29,15
+29,27
+29,34
+29,50
+29,99
+29,113
+30,104
+31,36
+32,99
+32,113
+33,109
+34,71
+34,108
+35,113
+36,99
+36,108
+36,113
+37,99
+38,99
+38,102
+38,113
+39,110
+40,90
+40,113
+41,97
+41,113
+42,91
+42,99
+42,102
+42,108
+42,109
+42,113
+43,99
+43,113
+44,94
+44,115
+45,109
+45,113
+46,113
+47,12
+47,99
+47,105
+48,13
+48,40
+48,113
+49,113
+50,27
+50,99
+51,20
+51,106
+52,31
+52,75
+52,90
+52,113
+53,102
+53,108
+54,15
+54,97
+54,99
+54,113
+55,75
+55,99
+55,107
+56,82
+56,113
+57,97
+57,99
+58,34
+58,102
+59,102
+60,20
+60,102
+60,113
+61,22
+61,102
+62,113
+63,99
+64,113
+65,113
+66,84
+66,113
+67,38
+67,86
+67,99
+67,102
+68,94
+69,102
+69,113
+70,108
+71,68
+71,99
+71,102
+71,113
+72,102
+72,113
+73,24
+73,99
+74,102
+74,113
+75,22
+75,97
+75,113
+76,99
+77,113
+78,60
+78,99
+78,113
+79,97
+79,113
+80,108
+80,109
+80,113
+81,14
+81,102
+82,69
+82,99
+82,101
+82,113
+83,97
+84,113
+85,99
+86,97
+86,106
+86,113
+87,102
+88,6
+88,113
+89,91
+89,93
+89,113
+90,24
+90,113
+91,97
+91,113
+92,113
+93,28
+93,99
+93,113
+94,43
+94,89
+94,99
+94,113
+95,113
+96,98
+96,113
+97,102
+97,122
+97,124
+97,127
+97,129
+97,136
+97,145
+97,148
+97,150
+97,172
+97,175
+97,179
+99,108
+99,113
+99,117
+99,122
+99,123
+99,125
+99,127
+99,128
+99,132
+99,133
+99,134
+99,136
+99,137
+99,145
+99,146
+99,148
+99,150
+99,152
+99,153
+99,157
+99,164
+99,166
+99,168
+99,169
+99,170
+99,171
+99,172
+99,175
+99,176
+99,177
+99,178
+100,105
+101,137
+101,141
+101,153
+102,119
+102,126
+102,130
+102,134
+102,135
+102,138
+102,154
+102,159
+102,163
+102,165
+102,170
+102,171
+104,135
+105,112
+105,155
+106,146
+107,103
+108,117
+108,129
+108,130
+108,145
+108,152
+108,163
+109,125
+109,137
+109,138
+109,146
+109,151
+109,155
+109,163
+110,175
+110,177
+113,116
+113,117
+113,118
+113,119
+113,120
+113,121
+113,123
+113,126
+113,130
+113,131
+113,135
+113,137
+113,138
+113,139
+113,140
+113,142
+113,143
+113,144
+113,147
+113,149
+113,150
+113,152
+113,154
+113,156
+113,158
+113,159
+113,160
+113,161
+113,162
+113,163
+113,164
+113,166
+113,167
+113,168
+113,169
+113,170
+113,171
+113,173
+113,174
+113,175
+113,178
+113,179
+113,180
+114,173

+ 180 - 0
scheduler/algo1Folder/nodes.csv

@@ -0,0 +1,180 @@
+1,S
+2,S
+3,S
+4,S
+5,S
+6,S
+7,S
+8,S
+9,S
+10,S
+11,S
+12,S
+13,S
+14,S
+15,S
+16,S
+17,S
+18,S
+19,S
+20,S
+21,S
+22,S
+23,S
+24,S
+25,S
+26,S
+27,S
+28,S
+29,S
+30,S
+31,S
+32,S
+33,S
+34,S
+35,S
+36,S
+37,S
+38,S
+39,S
+40,S
+41,S
+42,S
+43,S
+44,S
+45,S
+46,S
+47,S
+48,S
+49,S
+50,S
+51,S
+52,S
+53,S
+54,S
+55,S
+56,S
+57,S
+58,S
+59,S
+60,S
+61,S
+62,S
+63,S
+64,S
+65,S
+66,S
+67,S
+68,S
+69,S
+70,S
+71,S
+72,S
+73,S
+74,S
+75,S
+76,S
+77,S
+78,S
+79,S
+80,S
+81,S
+82,S
+83,S
+84,S
+85,S
+86,S
+87,S
+88,S
+89,S
+90,S
+91,S
+92,S
+93,S
+94,S
+95,S
+96,S
+97,D
+98,D
+99,D
+100,D
+101,D
+102,D
+103,D
+104,D
+105,D
+106,D
+107,D
+108,D
+109,D
+110,D
+111,D
+112,D
+113,D
+114,D
+115,D
+116,D
+117,D
+118,D
+119,I
+120,I
+121,I
+122,I
+123,I
+124,I
+125,I
+126,I
+127,I
+128,I
+129,I
+130,I
+131,I
+132,I
+133,I
+134,I
+135,I
+136,I
+137,I
+138,I
+139,I
+140,I
+141,I
+142,I
+143,I
+144,I
+145,I
+146,I
+147,I
+148,I
+149,I
+150,I
+151,I
+152,I
+153,I
+154,I
+155,I
+156,I
+157,I
+158,I
+159,I
+160,I
+161,I
+162,I
+163,I
+164,I
+165,I
+166,I
+167,I
+168,I
+169,I
+170,I
+171,I
+172,I
+173,I
+174,I
+175,I
+176,I
+177,I
+178,I
+179,I
+180,I

+ 0 - 0
scheduler/algo1Folder/丁瑞华-拓扑优化


BIN
scheduler/algo1Folder/测试输出/边集(教师模型测试).xlsx


BIN
scheduler/algo1Folder/测试输出/边集(教师测试删除).xlsx


BIN
scheduler/algo1Folder/测试输出/边集(教师预测).xlsx


+ 646 - 0
scheduler/algo2Folder/controller.py

@@ -0,0 +1,646 @@
+import networkx as nx
+from networkx.algorithms import community    # for community structure later
+import collections
+from matplotlib import pyplot as plt
+from networkx.algorithms import approximation as app
+import operator
+# from networkx.generators.community import LFR_benchmark_graph
+import math
+import time
+from itertools import repeat
+import copy
+import pickle
+import random
+import numpy as np
+import pandas as pd
+from functools import reduce
+from scipy.special import comb, perm
+from itertools import repeat
+import copy
+import time
+import os
+import csv
+import json
+import requests
+import traceback
+
+from itertools import combinations
+from collections import defaultdict
+from collections import deque
+from optparse import OptionParser
+
+######################################
+# STEP 0: Initial graph ##
+######################################
+# file = open('./generate/Graph_gpickleSCS.gpickle', 'rb')
+# Graph=pickle.load(file)
+
+''' 准备数据 '''
+SCHEDULER_BASE_URL = os.getenv("SCHEDULER_BASE_URL")
+BACKEND_BASE_URL = os.getenv("BACKEND_BASE_URL")
+
+missionId = os.getenv("missionId")
+planId = os.getenv("planId")
+
+headers = {
+    "Content-Type": "application/json",  # 明确声明数据格式
+    "Accept": "application/json"         # 声明期望的响应格式
+}
+params = {
+    "missionId": missionId,
+    "planId": planId,
+}
+
+print("[output]", json.dumps({'msg': 'started'}), flush=True)
+
+response = requests.get(SCHEDULER_BASE_URL + '/fetchData', params=params, headers=headers)
+fetchedData = response.json()
+if not fetchedData:
+    # 此处应当放置错误报告
+    quit()
+# fetchedData: {'nodes': [] , 'edges': []}
+'''准备数据(完毕)'''
+
+# 更改为从flask获取数据
+input_nodes = []
+for line in fetchedData['nodes']:
+    # 清空原有meta
+    line['meta'] = []
+    input_nodes.append([int(line['id']), str(line['type']).upper()])
+input_edges = []
+for line in fetchedData['edges']:
+    # 清空原有meta
+    line['meta'] = []
+    input_edges.append([int(line['from']), int(line['to'])])
+# 检测节点编号,本程序要求从0起
+flag = True
+for node in input_nodes:
+    if int(node[0]) == 0:
+        flag = False
+if flag:
+    # 原始数据不从0开始,则所有节点的id均减一,同时边的节点id也减一
+    for node in input_nodes:
+        node[0] = int(node[0]) - 1
+    # 同时修改原始输入数据
+    for node in fetchedData['nodes']:
+        node['id'] = int(node['id']) - 1
+
+    for edge in input_edges:
+        edge[0] = int(edge[0]) - 1
+        edge[1] = int(edge[1]) - 1
+    for edge in fetchedData['edges']:
+        edge['from'] = int(edge['from']) - 1
+        edge['to'] = int(edge['to']) - 1
+
+# print("测试输出节点和边")
+# print(input_nodes)
+# print(input_edges)
+
+# file = open('nodes.csv', 'r')
+# idx = 0
+# for line in file:
+#     input_nodes.append([idx, line.replace('\n', '')])
+#     idx += 1
+# file.close()
+# file = open('edges.csv', 'r')
+# csvfile = csv.reader(file)
+# idx = 0
+# for line in csvfile:
+#     input_edges.append([int(line[0]), int(line[1])])
+    
+Graph = nx.Graph()
+for i in input_nodes:
+    Graph.add_nodes_from([i[0]],type=i[1])
+for i in input_edges:
+    #G.add_weighted_edges_from([(i,j,random.random())])
+    Graph.add_edges_from([(i[0], i[1])])
+###### 手动输入图结构 ###########
+'''
+Graph = nx.DiGraph()
+nodesfile = open('./nodes', 'r')
+for line in nodesfile:
+    nodeline = line.replace('\n','').split(',')
+    Graph.add_node(int(nodeline[0]), type = nodeline[1])
+
+edgefile = open('./edges', 'r')
+for line in edgefile:
+    edgeline = line.replace('\n', '').split(',')
+    Graph.add_edges_from([(int(edgeline[0]), int(edgeline[1]))])
+'''
+###############################    
+    
+    
+G = Graph.to_undirected()  # 无向图
+graphname = 'ori' + str(random.randint(10000, 99999))
+
+## remove self loops & degree = 0
+# G.remove_edges_from(G.selfloop_edges(G, data=True))  #ckb change
+isola = [k for k in nx.isolates(G)]
+G.remove_nodes_from(isola)
+
+Dict_Centrality = nx.degree_centrality(G)
+Centrality = list(Dict_Centrality.values())  # degree centerality
+Name = list(Dict_Centrality.keys())
+A = nx.adjacency_matrix(G)  # A = matix
+
+#code for search SDI
+nodes=Graph.nodes
+li = np.array(nx.adjacency_matrix(Graph).todense())# 转换为numpy矩阵是因为原始的格式不支持A[i][j]形式的索引
+
+class GrfAllEdge():
+    # 定义方法,重点是items列表用作栈
+    def __init__(self, total):
+        self.total = total
+        self.li = li # 使用局部邻接矩阵
+        self.SDIi = []
+
+    def bfs_paths(self,start,goal,max_depth=5):
+        if start == goal:
+            return
+        queue = deque([(start,[start])])
+        while queue:
+            current,path = queue.popleft()
+            if len(path) > max_depth:
+                continue
+            # 获取栈顶的类型,防止D到S的路径
+            tp_current = Graph._node[path[-1]]['type']
+            for next_node in range(self.total):
+                if self.li[current][next_node] == 1 and next_node not in path:
+                    if len(path) >= max_depth:
+                        continue
+                    tp_next = Graph._node[next_node]['type']
+                    if tp_current == 'D' and tp_next == 'S':
+                        continue
+                    new_path = list(path)  # 复制当前路径
+                    new_path.append(next_node)  # 添加新节点到路径
+                    if next_node == goal:  # 如果下一个节点是目标节点
+                        self.SDIi.append(new_path)  # 添加到解决方案路径列表
+                    else:
+                        queue.append((next_node, new_path))  # 将新路径添加到队列
+def get_oc(node):
+    #get SDI nodes
+    SensSet, DeciSet, InfluSet = [], [], []
+    for index in range(len(node)):
+        tps = Graph._node[index]['type']
+        if tps == 'S':
+            SensSet.append(index)
+        elif tps == 'D':
+            DeciSet.append(index)
+        elif tps == 'I':
+            InfluSet.append(index)
+    #get OC by DFS
+    OC_ALL = []
+    for orig in SensSet: #sensor nodes
+        for goal in InfluSet: #influencer nodes
+            edge = GrfAllEdge(len(node))
+            edge.bfs_paths(orig,goal)
+            OC_ALL.extend(edge.SDIi)
+    return OC_ALL
+
+    # 提取社团中的节点编号
+def get_community_nodes(community):
+    return [node[0] for node in community]
+
+def get_communities_oc(communities):
+    communities_oc = {}
+    for community_name, community_nodes in communities.items():
+        community_nodes_ids = get_community_nodes(community_nodes)
+        communities_oc[community_name] = get_oc(community_nodes_ids)
+    return communities_oc
+
+
+#转换作战链数为相对作战链矩阵
+
+def transform_matrix(matrix):
+    # 获取矩阵形状和扁平化的矩阵
+    rows, cols = matrix.shape
+    flat_matrix = matrix.flatten()
+    # 获取非零元素及其索引
+    non_zero_indices = np.where(flat_matrix != 0)[0]
+    non_zero_elements = flat_matrix[non_zero_indices]
+    # 获取唯一值及其反向索引,用于构建排名
+    unique_values, inverse_indices = np.unique(-non_zero_elements, return_inverse=True)
+    # 对唯一值排名,这里使用负数是因为我们希望降序排列
+    ranks = np.zeros_like(non_zero_elements, dtype=int)
+    for i in range(len(unique_values)):
+        ranks[inverse_indices == i] = i + 1
+    # 将排名映射回原始矩阵位置
+    ranked_non_zero_elements = np.zeros_like(flat_matrix, dtype=int)
+    ranked_non_zero_elements[non_zero_indices] = ranks
+    # 调整矩阵,将零元素的排名设置为最大排名加一
+    max_rank = np.max(ranks)
+    ranked_non_zero_elements[ranked_non_zero_elements == 0] = max_rank + 1
+    # 重新形成原始矩阵的形状
+    ranked_matrix = ranked_non_zero_elements.reshape(rows, cols)
+
+    for i in range(len(ranked_matrix)):
+        for j in range(len(ranked_matrix)):
+            if i == j:
+                ranked_matrix[i][j] = 0
+    return ranked_matrix
+
+#缩小作战链矩阵的算法
+def Matrix_shrink_oc(oc_temp,ii,jj):
+    k1 = oc_temp[:,ii]
+    k2 = oc_temp[:,jj]
+
+    dd = np.delete(oc_temp,[ii,jj],1)
+    dd = np.delete(dd,[ii,jj],0)
+    kk = np.maximum(k1,k2)
+    kk = np.delete(kk,[ii,jj],0)
+    m1 = np.vstack([dd,kk])
+    m2 = np.append(kk,0)
+    shrank = np.vstack([m1.T,m2])
+    return shrank
+
+def main_function():
+    ######################################
+    # STEP 1: Identification of sources ##
+    ######################################
+    # print('#####  STEP 1  #####')
+    # print('--------------------')
+
+    start_s1 = time.perf_counter()
+
+    source = []
+    sink = []
+    iso = []
+    leaf = []
+    nodetemp = list(G.nodes)
+
+    count_s1 = 0  # count nodes
+    for i in nodetemp:
+        count_s1 += 1
+        # if count_s1 % 1000 == 0:  # calculate time per 1000 nodes
+        #     print('Time Elapsed--- ' + str((time.perf_counter() - start_s1)) + ' Node:' + str(count_s1) + '/' + str(
+        #         len(G)) + '\n')
+        nei = list(G.neighbors(i))
+        iso_count = 0
+        source_count = 0
+        sink_count = 0
+        if len(nei) == 1:  # leaf
+            leaf.append(i)
+            continue
+
+        for ii in nei:  # counter
+            '''
+            node > neibour:source++
+            node == neibour:isolate++
+
+            '''
+            if Dict_Centrality.get(i) > Dict_Centrality.get(ii):
+                source_count += 1
+            elif Dict_Centrality.get(i) == Dict_Centrality.get(ii):
+                iso_count += 1
+                source_count += 1  # ?
+            else:
+                sink_count += 1
+                continue
+
+        if iso_count == G.degree(i):  # all the
+            if all(Centrality[Name.index(p)] == Centrality[Name.index(i)] for p in list(G.neighbors(i))):  # clique
+                if not any(w in source for w in list(G.neighbors(i))):  # 顺序的问题?
+
+                    source.append(i)  # get one as hub, the other are inner members
+                    Centrality[Name.index(i)] += 0.5  # additive value to this hub
+            else:
+                iso.append(i)  # non-clique
+
+        if source_count == G.degree(i):
+            if i not in iso and i not in source:  # source: greater than at least one neighbor in centrality score
+                source.append(i)
+        if sink_count == G.degree(i) & G.degree(i) > 1:
+            sink.append(i)
+    
+    # 完成第一步,进度20%
+    print("[output]", json.dumps({'msg': 'progress', 'data': 20}), flush=True)
+    r_source = len(source) / len(G)  # proportion of source
+    r_sink = len(sink) / len(G)  # proportion of sink
+    inner = len(G) - len(source) - len(sink) - len(iso) - len(leaf)
+    #############################################################
+    # STEP 2: Propagation and Formulation of Local Communities ##
+    #############################################################
+    # print('#####  STEP 2  #####')
+    # print('--------------------')
+    start_s2 = time.perf_counter()
+
+    History = [[] for i in repeat(None, len(nx.nodes(G)))]  # H = (history,time)
+    community = [[] for i in repeat(None, len(source))]  # X = (source_node,time)
+
+    t = 0
+    tmax = 100
+
+    time_record = []
+
+    for i in range(len(source)):
+        community[i].append((source[i], t))  # first label , first contagion time
+        History[Name.index(source[i])] = [(source[i], 0)]
+
+    while t < tmax:
+        if t % 10 == 0 and t > 0:
+            print("[output]", json.dumps({'msg': 'progress', 'data': int(20 + t / tmax * 30)}), flush=True)
+        old_community = copy.deepcopy(community)
+        old_history = copy.deepcopy(History)
+        t = t + 1
+
+        for i in range(len(source)):  # all propagation happens at the same time
+            # if (i + 1) % 100 == 0:
+                # print('Iteration:' + str(t) + '/' + str(tmax) + '---' + 'Source:' + str(i + 1) + '/' + str(
+                #     len(source)) + '---Time Elapsed---' + str(
+                #     (time.perf_counter() - start_s2)) + '---CommunitySize---' + str(len(community[i])))
+
+            for j in community[i]:
+                if j[1] == t - 1:  # newly join the community from last round propagation
+                    for s in G.neighbors(j[0]):
+                        if Centrality[Name.index(s)] < Centrality[Name.index(j[0])]:
+                            if s not in [k[0] for k in community[i]]:
+                                community[i].append((s, t))
+                                History[Name.index(s)].append((source[i], t))
+        time_record.append((time.perf_counter() - start_s2))
+
+        if old_community == community or old_history == History:  # no change in History or community membership
+            break
+        # check History and community are consistent #
+
+        if sum(len(History[i]) for i in range(len(History))) != sum(len(community[i]) for i in range(len(community))):
+            print('WRONG! COMMUNITY AND HISTORY DONT MATCH!')
+
+    ave_membership = sum(len(History[i]) for i in range(len(History))) / len(History)  # mh
+    ave_size = sum(len(community[i]) for i in range(len(community))) / len(community)  # mx
+    # mh = len(S)/N * mx ?
+    elapsed = (time.perf_counter() - start_s2)
+
+    # plot local communities #
+    from matplotlib import colors as mcolors
+
+    colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
+
+    old_co = list(community)
+    old_his = list(History)
+
+    len_hist = [len(hh) for hh in History]
+    r_crossover = len(len_hist) - len_hist.count(1)
+
+    ###############################################
+    # STEP 3&4: Aggregation of Small Communities ##
+    ###############################################
+
+    # print('##### STEP 3&4 #####')
+    # print('--------------------')
+    start_s3 = time.perf_counter()
+
+    #save chain
+    # import os
+    # current_path = os.path.dirname(os.path.abspath(__file__))
+    # file_path = os.path.join(current_path, "oc.txt")
+
+    #write
+    # OC_ALL = get_oc(nodes)
+    # with open(file_path,"w") as file:
+    #         for path in OC_ALL:
+    #             if path:
+    #                 file.write(",".join(map(str,path)) + "\n")
+
+    #read chain
+    # with open(file_path, "r") as file:
+    #     OC_ALL = [list(map(int, line.strip().split(','))) for line in file]
+
+    #get operation chain in any community
+    community_dict = {}
+    for comm in community:
+        community_name = comm[0][0]
+        community_dict[community_name] = comm
+    # print(community_dict)
+    # 注意community_dict中即保存了功能体结构
+    # 取消将community_dict保存至文件,改为将其直接传递至flask
+    # community_file = open('community.csv', 'w', newline='')
+    # community_csv = csv.writer(community_file)
+    # for community_source_node in community_dict:
+    #     for member_node in community_dict[community_source_node]:
+    #         community_csv.writerow([member_node[0], member_node[1], community_source_node])
+    # community_file.close()
+    # print("SOURCE", source)
+    print("[output]", json.dumps({'msg': 'progress', 'data': 80}), flush=True)
+    try:
+        source_oc = get_communities_oc(community_dict)
+        #get shared chain matrix
+        OC_source = np.zeros((len(source),len(source)))
+        for i,community_id_1 in enumerate(source):
+            for j,community_id_2 in enumerate(source[i+1:],i+1):#只遍历上三角
+                chains_1 = set(map(tuple, source_oc[community_id_1]))
+                chains_2 = set(map(tuple, source_oc[community_id_2]))
+                shared_chains = chains_1.intersection(chains_2)
+                # if i == 3 and j ==4:
+                    # print(shared_chains)
+                shared_count = len(shared_chains)
+                OC_source[i][j] += shared_count
+                OC_source[j][i] += shared_count  # 利用对称性将值赋给矩阵的另一半
+    except Exception as error:
+        print(error, flush=True)
+    # print(OC_source)
+    # for i in range(len(source)):
+    #     for j in range(len(source)):
+    #         if i == j:
+    #             continue
+    #         else:
+    #             shared_oc = set(source_oc[i]).intersection(source_oc[j])
+    #             OC_source[i][j] += len(shared_oc)
+    # print(OC_source)
+    # OC_source_new = transform_matrix(OC_source).astype(int)
+    # # print(OC_source_new)
+    # #epsilon
+    # epsilon_max = int(OC_source_new.max())
+    # hierarchy_community = [list(source)]
+    # epsilon_community_size = [(len(OC_source_new), 0)]
+    # oc_temp = OC_source_new
+    # oc_record = [list(oc_temp)]
+
+
+    # phi_list = []  ## list of phi-epsilon
+    # phi_ref_list = [] ## list of reference phi-epsilon
+
+    # print("[output]", json.dumps({'msg': 'progress', 'data': 90}), flush=True)
+    # for l in range(1,epsilon_max + 1):
+    #     # print('Epsilon:' + str(l) + '/' + str(epsilon_max) + '---' + 'Time Elapsed:' + str((time.perf_counter() - start_s3)))
+    #     temp = list(hierarchy_community[-1])
+    #     merging_count = 0  # count of num of merging (in each epsilon)
+
+    #     while True:
+    #         ij = np.argwhere(oc_temp == l) # Note: l starts from 1
+    #         # print("Ep = ",str(l),"ij = ",ij)
+    #         if len(ij) == 0:   # no element == l
+    #             break
+
+    #         merging_count += 1
+    #         #change
+    #         rand_index = np.random.choice(len(ij))
+    #         ii, jj = ij[rand_index]
+    #         # ii = ij[0][0]
+    #         # jj = ij[0][1]
+    #         if type(temp[ii]) != list:    # str to list
+    #             temp[ii] = [temp[ii]]
+    #         if type(temp[jj]) != list:    # str to list
+    #             temp[jj] = [temp[jj]]
+    #         temp_com = temp[ii] + temp[jj] #merge community
+    #         tempp = [temp[ii],temp[jj]]
+    #         tempp_copy = list(tempp)
+    #         # print("--------------------")
+    #         # print("temp = ", temp, " Ep = ", str(l))
+    #         # print("temp[ii] = ",temp[ii]," temp[jj] = ",temp[jj]," temp_com = ",temp_com," tempp = ",tempp," temp_copy = ",tempp_copy)
+    #         # print("--------------------")
+
+    #         if len(temp[ii]) == 1:
+    #             tempp_copy[0] = temp[ii][0]
+    #         if len(temp[jj]) == 1:
+    #             tempp_copy[1] = temp[jj][0]
+
+    #         #merge community
+    #         temp.remove(tempp[0])   # remove old small community 1
+    #         temp.remove(tempp[1])   # remove old small community 2
+    #         temp.append(temp_com)
+    #         #shrink oc_matrix
+    #         oc_temp = Matrix_shrink_oc(oc_temp,ii,jj)
+    #         # print("oc_temp = ")
+    #         # print(oc_temp)
+    #     oc_record.append(oc_temp)
+    #     # jac_record.append(jac_temp)
+    #     hierarchy_community.append(temp)
+    #     epsilon_community_size.append((len(oc_temp),l+1))
+        # print("hierarchy_community = ",hierarchy_community)
+        # 注意hierarchy_community中保存了不同社团的层级关系,但是目前无法利用
+
+        ## unconnected components ##   i think oc_bad can merge
+        # if len(np.argwhere(oc_temp == int(OC_source_new.max()))) == len(oc_temp)*(len(oc_temp)-1):#int(OC_source_new.max())is dummy
+        #     break
+
+    # 准备汇报flask的数据
+    result = {
+        'missionId': missionId,
+        'planId': planId,
+        'progress': 100,
+        'nodes': [],
+        'edges': [],
+    }
+    # 将节点和边直接放入nodes和edges
+    result['nodes'] = fetchedData['nodes'].copy()
+    # 删除可能存在的旧的group信息
+    for n in result['nodes']:
+        for meta_index in range(len(n['meta'])-1, -1, -1):
+            if 'group' in n['meta'][meta_index]:
+                del n['meta'][meta_index]
+    result['edges'] = fetchedData['edges'].copy()
+    # print(result['edges'])
+    # print(result['nodes'])
+    # 构建功能体核心节点与功能体编号的映射
+    groups = {}
+    group_index = 0
+    for leader in community_dict:
+        groups[group_index] = leader
+        for group_node in community_dict[leader]:
+            # 修改节点的meta,标注其所属的功能体
+            node = [n for n in result['nodes'] if int(n['id']) == int(group_node[0])][0]
+            for dicts in node['meta']:
+                if type(dicts) != dict:
+                    print("ERROR, WRONG META", node['meta'])
+                    raise ValueError("ERROR, WRONG META")
+            node['meta'].append({
+                'group': group_index,
+            })
+        group_index += 1
+    
+                
+    print("[output]", json.dumps({'msg': 'result', 'data': result}), flush=True)
+    ## refine hierarchy_community 0 ##
+    # for i in range(len(hierarchy_community[0])):
+    #     hierarchy_community[0][i] = [(hierarchy_community[0][i])]
+
+    #get hierarchy_nodes
+    # com_node_dic = {}
+    # for i in range(len(source)):
+    #     com_node_dic[source[i]] = community_nodes[i]
+    # hierarchy_nodes = []
+    # for hel_com in hierarchy_community:#对于层次的每一层
+    #     level_temp = []
+    #     for i in hel_com:#对于每层中的各个社团
+    #         if not isinstance(i,list):#不是小社团
+    #             nodes_all = set(com_node_dic[i]) if isinstance(com_node_dic[i], (list, set, tuple)) else set([com_node_dic[i]])
+    #             level_temp.append(nodes_all)
+    #         else:
+    #             nodes_all = set()
+    #             for j in i:
+    #                 nodes_all.update(com_node_dic[j] if isinstance(com_node_dic[j], (list, set, tuple)) else [com_node_dic[j]])
+    #             level_temp.append(nodes_all)
+    #     hierarchy_nodes.append(level_temp)
+
+    #
+    # nodetemp = list(G.nodes)
+    # sensors = 0  # 传感器
+    # deciders = 0  # 决策者
+    # influencer = 0  # 影响者
+    # for i in range(len(nodetemp)):
+    #     tps = G._node[i]['type']
+    #     if tps == 'S':
+    #         sensors = sensors + 1
+    #     elif tps == 'D':
+    #         deciders = deciders + 1
+    #     else:
+    #         influencer = influencer + 1
+    # print("Num of node S:" + str(sensors))
+    # print("Num of node D:" + str(deciders))
+    # print("Num of node I:" + str(influencer))
+
+    # print('Num of nodes:'+ str(len(G.nodes)))
+    # print('Num of edges:'+ str(len(G.edges)))
+    # print('Num of operation chains:'+ str(len(OC_ALL)))
+    # print('Num of sources:'+ str(len(source)))
+    # print('Num of sinks:'+ str(len(sink)))
+    # print('Num of isolated nodes:'+ str(len(iso)))
+    # print('Num of leaf nodes:'+ str(len(leaf)))
+    # print('Num of inner members:'+ str(inner))
+    # print("hierarchy_community = ",hierarchy_community)
+    # print("epsilon_community_size = ",epsilon_community_size)
+    # print("epsilon_max = ",epsilon_max)
+
+    
+    
+    # # save files
+    # g = nx.read_gpickle("./generate/Graph_gpickleSCS.gpickle")
+    # # #1.leaf sink hub
+    # # 为列表中的每一个节点添加属性,同时检查节点是否存在
+    # for node_list, node_type in zip([source, sink, iso, leaf], ["hub", "sink", "isolated", "leaf"]):
+    #     for node in node_list:
+    #         # 检查节点是否存在于图中
+    #         if node in g:
+    #             g._node[node]["detect_node"] = node_type
+    #         else:
+    #             # 如果节点不存在,输出一个错误消息
+    #             print(f"Node {node} not found in graph.")
+    #2.small community
+    # for community_index, nodes in enumerate(community_nodes):
+    #     for node in nodes:
+    #         if g.has_node(node):
+    #             g._node[node]["community"] = community_index
+    #3.hierarchy_community
+    # 遍历hierarchy_community和hierarchy_nodes来建立每个节点的属性
+    # for level, (communities, nodes) in enumerate(zip(hierarchy_community, hierarchy_nodes)):
+    #     for community_id, community_nodes in zip(communities, nodes):
+    #         # 如果community_id是列表,那么社团需要合并
+    #         if isinstance(community_id, list):
+    #             for sub_community in community_id:
+    #                 for node in community_nodes:
+    #                     g._node[node] = {'community_id': sub_community, 'hierarchy_level': level}
+    #         else:
+    #             for node in community_nodes:
+    #                 g._node[node] = {'community_id': community_id, 'hierarchy_level': level}
+    #     path = './generate/Graph_gml' + str(level)+'.gml'
+    #     nx.write_gml(g, path)
+    
+    # nx.write_gml(g, './generate/my_Graph_gml.gml')
+    # nx.write_gpickle(g, "./generate/Graph_gpickleSCS.gpickle")
+if __name__ == '__main__':
+    try:
+        main_function()
+    except Exception as error:
+        print(str(error))
+        print(traceback.format_exc())
+        print("END ERROR")

+ 2 - 0
scheduler/algo2Folder/亢靖-功能体探测-层级关系

@@ -0,0 +1,2 @@
+亢靖对网络社团层级进行了分析计算,但是该部分计算要求各社团之间存在共用链路
+计算时间过长且难以在结果显示中体现,暂时将其社团层级代码注释跳过

+ 96 - 0
scheduler/algo3Folder/AUC.py

@@ -0,0 +1,96 @@
+import numpy as np
+import random
+import time
+
+def Calculation_AUC(MatrixAdjacency_Train, MatrixAdjacency_Test, Matrix_similarity, MaxNodeNum):
+    AUC_TimeStart = time.clock()
+    print
+    '    Calculation AUC......'
+    AUCnum = 672400
+
+    Matrix_similarity = np.triu(Matrix_similarity - Matrix_similarity * MatrixAdjacency_Train)
+    Matrix_NoExist = np.ones(MaxNodeNum) - MatrixAdjacency_Train - MatrixAdjacency_Test - np.eye(MaxNodeNum)
+
+    Test = np.triu(MatrixAdjacency_Test)
+    NoExist = np.triu(Matrix_NoExist)
+
+    #     Test_num =len(np.argwhere(Test == 1))
+    #     NoExist_num = len(np.argwhere(NoExist == 1))
+    # # #   Test_num = np.nonzero(Test)[0].shape[0]
+    # # #   NoExist_num = np.nonzero(NoExist)[0].shape[0]
+
+    Test_num = len(np.argwhere(Test == 1))
+    NoExist_num = len(np.argwhere(NoExist == 1))
+    #     print '    Test_num:%d'%Test_num
+    #     print '    NoExist_num:%d'%NoExist_num
+
+    Test_rd = [int(x) for index, x in enumerate((Test_num * np.random.rand(1, AUCnum))[0])]
+    NoExist_rd = [int(x) for index, x in enumerate((NoExist_num * np.random.rand(1, AUCnum))[0])]
+    #     print '    Test_rd:'+str(Test_rd)
+    #     print '    Test_rd长度:'+str(len(Test_rd))
+    #     print '    Test_rd最大值:'+str(max(Test_rd))
+    #     print '    NoExist_rd:'+str(NoExist_rd)
+    #     print '    NoExist_rd长度:'+str(len(NoExist_rd))
+    TestPre = Matrix_similarity * Test
+    NoExistPre = Matrix_similarity * NoExist
+
+    TestIndex = np.argwhere(Test == 1)
+    Test_Data = np.array([TestPre[x[0], x[1]] for index, x in enumerate(TestIndex)]).T
+    NoExistIndex = np.argwhere(NoExist == 1)
+    NoExist_Data = np.array([NoExistPre[x[0], x[1]] for index, x in enumerate(NoExistIndex)]).T
+    #     print Test_Data
+    #     print Test_Data.shape
+    #     print NoExist_Data
+    #     print NoExist_Data.shape
+
+    Test_rd = np.array([Test_Data[x] for index, x in enumerate(Test_rd)])
+    NoExist_rd = np.array([NoExist_Data[x] for index, x in enumerate(NoExist_rd)])
+    #     print Test_rd
+    #     print Test_rd.shape
+    #     print NoExist_rd
+    #     print NoExist_rd.shape
+
+    #     aucArray = Test_rd - NoExist_rd
+    #     n1 = len(np.argwhere(aucArray > 0))
+    #     n2 = len(np.argwhere(aucArray == 0))
+    n1, n2 = 0, 0
+    for num in range(AUCnum):
+        if Test_rd[num] > NoExist_rd[num]:
+            n1 += 1
+        elif Test_rd[num] == NoExist_rd[num]:
+            n2 += 0.5
+        else:
+            n1 += 0
+    auc = float(n1 + n2) / AUCnum
+    print('    AUC指标为:%f' % auc)
+    AUC_TimeEnd = time.clock()
+    print('    AUCTime:%f s' % (AUC_TimeEnd - AUC_TimeStart))
+    return auc
+
+
+# 随机选择元素进行比较,并计算得分
+def calculate_score(N, train, text, score, n):
+    total_score = 0.0
+    for i in range(n):
+        while True:
+            random_row = random.randint(0, N-1)
+            random_col = random.randint(0, N-1)
+            if train[random_row][random_col] == 0:
+                rand_index_train = [random_row, random_col]
+                break
+        while True:
+            random_row = random.randint(0, N-1)
+            random_col = random.randint(0, N-1)
+            if text[random_row][random_col] == 1:
+                rand_index_text = [random_row, random_col]
+                break
+
+        # 计算得分
+        ##print("text_score:", score[rand_index_text[0]][rand_index_text[1]])
+        ##print("train_score:", score[rand_index_train[0]][rand_index_train[1]])
+        if score[rand_index_text[0]][rand_index_text[1]] > score[rand_index_train[0]][rand_index_train[1]]:
+            total_score += 1.0
+        elif score[rand_index_text[0]][rand_index_text[1]] == score[rand_index_train[0]][rand_index_train[1]]:
+            total_score += 0.5
+
+    return total_score

BIN
scheduler/algo3Folder/__pycache__/AUC.cpython-38.pyc


+ 873 - 0
scheduler/algo3Folder/controller.py

@@ -0,0 +1,873 @@
+from AUC import *
+import copy
+import xlrd
+import math
+from xlrd import xldate_as_tuple
+import datetime
+import numpy as np
+import seaborn as sns
+import matplotlib.pyplot as plt
+import random
+import copy
+import xlwt
+import time
+import csv
+import os
+import json
+import requests
+import logging
+from scipy.interpolate import splrep, splev
+import traceback
+
+logging.disable(logging.DEBUG)
+
+
+''' 准备数据 '''
+SCHEDULER_BASE_URL = os.getenv("SCHEDULER_BASE_URL")
+BACKEND_BASE_URL = os.getenv("BACKEND_BASE_URL")
+
+missionId = os.getenv("missionId")
+planId = os.getenv("planId")
+
+headers = {
+    "Content-Type": "application/json",  # 明确声明数据格式
+    "Accept": "application/json"         # 声明期望的响应格式
+}
+params = {
+    "missionId": missionId,
+    "planId": planId,
+}
+
+print("[output]", json.dumps({'msg': 'started'}), flush=True)
+
+response = requests.get(SCHEDULER_BASE_URL + '/fetchData', params=params, headers=headers)
+fetchedData = response.json()
+if not fetchedData:
+    # 此处应当放置错误报告
+    quit()
+# fetchedData: {'nodes': [] , 'edges': []}
+'''准备数据(完毕)'''
+
+directory = r'测试输出'
+
+def import_excel(excel):
+    for rown in range(excel.nrows):
+        array = [0 for i in range(2)]
+
+        array[0] = table.cell_value(rown, 0)
+
+        array[1] = table.cell_value(rown, 1)
+
+        tables.append(array)
+
+
+class Matrix:
+
+    def __init__(self, row, column, fill=0):
+        self.shape = (row, column)
+        self.row = row
+        self.column = column
+        self._matrix = [[fill] * column for i in range(row)]
+
+    # 返回元素m(i, j)的值:  m[i, j]
+    def __getitem__(self, index):
+        if isinstance(index, int):
+            return self._matrix[index - 1]
+        elif isinstance(index, tuple):
+            return self._matrix[index[0] - 1][index[1] - 1]
+
+    # 设置元素m(i,j)的值为s:  m[i, j] = s
+    def __setitem__(self, index, value):
+        if isinstance(index, int):
+            self._matrix[index - 1] = copy.deepcopy(value)
+        elif isinstance(index, tuple):
+            self._matrix[index[0] - 1][index[1] - 1] = value
+
+    def __add__(self, N):
+        '''加法'''
+        # A + B
+        assert N.shape == self.shape, "维度不匹配,不能相加"
+        M = Matrix(self.row, self.column)
+        for r in range(self.row):
+            for c in range(self.column):
+                M[r, c] = self[r, c] + N[r, c]
+        return M
+
+    def __eq__(self, N):
+        # A == B
+        assert isinstance(N, Matrix), "注:类型不匹配,不能比较"
+        return N.shape == self.shape  # 比较维度
+
+    # def show(self):
+    #     # 输出函数
+    #     for r in range(self.row):  # 遍历
+    #         for c in range(self.column):
+    #             print(round(self[r, c + 1], 2), end='  ')
+    #             ##print(self[r, c + 1], end='  ')
+    #         # print()
+
+
+def main_process():
+    ##海湾战争/随机图1/随机图2/随机图3
+    # directory = r'./画图测试'
+    # print("//---------data set description---------//")
+    # 导入需要读取的Excel表格的路径
+    # filename_edge = 'edges.xls'
+    # file_path_edge = fr'{directory}\{filename_edge}'
+    # data_edge = xlrd.open_workbook(file_path_edge)
+    # filename_node = 'nodes.xls'
+    # file_path_node = fr'{directory}\{filename_node}'
+    # data_node = xlrd.open_workbook(file_path_node)
+
+    # 修改从flask获取nodes数据
+    data_node = fetchedData['nodes']
+    # 清空所有原有meta
+    for node in data_node:
+        node['meta'] = []
+    # 修改从flask获取edges数据
+    data_edge = fetchedData['edges'] # [{'form': X, 'to': X, 'meta': [{}{}{}]}]
+    # 清空所有原有meta
+    for edge in data_edge:
+        edge['meta'] = []
+
+
+    # 检测节点编号,本程序要求从0起
+    flag = True
+    for node in data_node:
+        if int(node['id']) == 0:
+            flag = False
+    if flag:
+        # 原始数据不从0开始,则所有节点的id均减一,同时边的节点id也减一
+        for node in data_node:
+            node['id'] = int(node['id']) - 1
+        for edge in data_edge:
+            edge['from'] = int(edge['from']) - 1
+            edge['to'] = int(edge['to']) - 1
+
+
+    # 准备汇报flask的数据
+    result = {
+        'missionId': missionId,
+        'planId': planId,
+        'progress': 100,
+        'nodes': [],
+        'edges': [],
+    }
+
+    Data_scale = 10000
+
+    # 获取第一个工作表
+    # sheet_node = data_node.sheet_by_index(0)
+    # 获取表格的行数
+    # N = sheet_node.nrows
+    # 改为从flask数据获取点的数量
+    N = 0
+    for node in data_node:
+        if int(node['id']) + 1 > N:
+            N = int(node['id']) + 1
+    # N = len(data_node)
+    # print("数据集(点集)长度:      %d" % (N))
+    # 初始化计数器
+    count_s = 0
+    count_d = 0
+    count_i = 0
+    # 改为遍历flask获取的数据
+    for node in data_node:
+        # cell_value = sheet_node.cell_value(row_index, 1)  # 获取第二列单元格的值
+        # 改为从flask数据获取节点的类型
+        cell_value = node['type'].upper()
+        if cell_value == "S":
+            count_s += 1
+        elif cell_value == "D":
+            count_d += 1
+        elif cell_value == "I":
+            count_i += 1
+    # print("其中包括传感节点(S):   %d" % (count_s))
+    # print("其中包括决策节点(D):   %d" % (count_d))
+    # print("其中包括响应节点(I):   %d" % (count_i))
+
+    # 统计行数,输出结果
+    # sheet_edge = data_edge.sheet_by_index(0)
+    # edge_num = sheet_edge.nrows
+    # 修改为从flask数据中统计边数
+    edge_num = len(data_edge)
+
+    # print("数据集(边集)长度:      %d" % (edge_num))
+
+    sum_num = N * N + count_s * count_s + count_d * count_d
+    Sparsity = (sum_num - edge_num) / sum_num * 100
+    # print("数据稀疏性:           " + str(round(Sparsity, 2)) + "%")
+    # print("//--------------------------------------//")
+
+    # 循环次数
+    epoch = 10
+    auc_sum = 0
+    method_time_sum = 0
+
+    for epoch_num in range(epoch):
+        # logging.basicConfig(
+        #     format='%(asctime)s    [%(levelname)s]  %(message)s',
+        #     datefmt='%d %b %H:%M'
+        # )
+        # logging.getLogger().setLevel(logging.DEBUG)
+        # logger = logging.getLogger()
+        # logger.info("Meta_AIProbS")
+
+        # time.sleep(0.1)
+        # 定义需要删除的元素比例
+        p = 0.2
+        test_set = p * 100
+        train_set = 100 - test_set
+        # print("number of experiments:            " + str(epoch_num + 1))
+        # print("proportion of training set:       " + str(train_set) + "%")
+        # print("proportion of testing set:        " + str(test_set) + "%")
+
+        TimeStart = time.perf_counter()
+
+        ##邻接矩阵
+        m = Matrix(N, N, fill=0)
+        train = Matrix(N, N, fill=0)
+        text = Matrix(N, N, fill=0)
+
+        # table = data_edge.sheets()[0]
+        # 创建一个空列表,存储Excel的数据
+
+        tables = []
+        # 将excel表格内容导入到tables列表中
+        # import_excel(table)
+        # 改为从flask获取数据中生成tables,tables中存放的是边
+        for edge in data_edge:
+            # 将每一条边的起始存入edge
+            tables.append([int(edge['from']), int(edge['to'])])
+        ##初始化训练集
+        for i in tables:
+            train[int(i[0])][int(i[1])] = 1
+            train[int(i[1])][int(i[0])] = 1
+
+        # 计算需要删除的元素个数
+        num_delete = int(len(tables) * p)
+        # 随机选择num_delete个元素进行删除
+        if num_delete > 0:
+            idx = random.sample(range(len(tables)), num_delete)
+            # deleted = [tables[i] for i in idx]
+            tables = [i for j, i in enumerate(tables) if j not in idx]
+
+        # 将剩余的元素输出到文件中
+        workbook = xlwt.Workbook()
+        worksheet = workbook.add_sheet('My Worksheet')
+        for i in range(len(tables)):
+            worksheet.write(i, 0, tables[i][0])
+            worksheet.write(i, 1, tables[i][1])
+
+            # 剩余边需要写入反馈给flask的数据
+            # 保存所有出现的点
+            row_index = int(tables[i][0])
+            col_index = int(tables[i][1])
+            if not any(int(row_index) == node['id'] for node in result['nodes']):
+                meta = [node['meta'] for node in data_node if int(node['id']) == int(row_index)][0]
+                node_type = [n['type'] for n in data_node if int(n['id']) == int(row_index)][0]
+                result['nodes'].append({'id': int(row_index), 'type': node_type.upper(), 'meta': meta})
+
+            if not any(int(col_index) == node['id'] for node in result['nodes']):
+                meta = [node['meta'] for node in data_node if int(node['id']) == int(col_index)][0]
+                node_type = [n['type'] for n in data_node if int(n['id']) == int(col_index)][0]
+                result['nodes'].append({'id': int(col_index), 'type': node_type.upper(), 'meta': meta})
+            # 将结果写入result汇报给flask
+            result['edges'].append({'from': int(row_index), 'to': int(col_index), 'meta': [{ 'predict': 'old' }]})
+
+        filename_text = '边集(测试).xlsx'
+        file_path_text = fr'{directory}\{filename_text}'
+        workbook.save(file_path_text)
+
+        '''
+        # 输出成功删除的元素个数和总元素个数
+        print("成功删除%d个元素,剩余%d个元素。" % (num_delete, len(tables)))
+
+        # 输出被删除的元素
+        if num_delete > 0:
+            print("被删除的元素为:")
+            for i in range(len(deleted)):
+                print(deleted[i])
+        else:
+            print("没有删除任何元素。")
+        '''
+
+        for i in tables:
+            m[int(i[0])][int(i[1])] = 1
+            m[int(i[1])][int(i[0])] = 1
+
+        for i in range(N):
+            for j in range(N):
+                if (train[i][j] == 1 and m[i][j] == 0):
+                    text[i][j] = 1
+
+        tables = []
+        # table = data_node.sheets()[0]
+        m_2 = ["0" for i in range(N)]
+        # import_excel(table)
+        # 改为从flask获取数据,table中存放的是节点数据
+        for node in data_node:
+            # m_2中存放的是节点的类型,小标为节点的id
+            m_2[int(node['id'])] = str(node['type']).upper()
+        # j = 0
+        # for i in tables:
+        #     m_2[j] = i[1]
+        #     j += 1
+
+        # print("Calculating H-index......")
+        TimeStart_1 = time.perf_counter()
+
+        matrix = [[0 for i in range(N)] for i in range(50)]
+
+        for i in range(N):
+            sum = 0
+            for j in range(N):
+                sum = sum + m[i][j]
+            matrix[0][i] = sum
+
+        if (N < Data_scale):
+            flag = 0
+            s = 0
+            while (flag < N):
+                flag = 0
+                for k in range(N):
+                    for i in range(matrix[s][k]):
+                        sum = 0
+                        for j in range(N):
+                            if (m[k][j] == 1) and (matrix[s][j] >= i + 1):
+                                sum += 1
+                        if sum > i:
+                            matrix[s + 1][k] = i + 1
+
+                for l in range(N):
+                    if matrix[s + 1][l] == matrix[s][l]:
+                        flag += 1
+                s += 1
+
+        else:
+            flag = 0
+            rule = int(N / 100) + 1
+            for ruler in range(rule):
+                half_a = 100 * ruler
+                half_b = min(100 * (ruler + 1), N)
+                s = 0
+                while (flag < half_b):
+                    flag = half_a
+                    for k in range(half_a, half_b):
+                        for i in range(matrix[s][k]):
+                            sum = 0
+                            for j in range(half_a, half_b):
+                                if (m[k][j] == 1) and (matrix[s][j] >= i + 1):
+                                    sum += 1
+                            if sum > i:
+                                matrix[s + 1][k] = i + 1
+
+                    for l in range(half_a, half_b):
+                        if matrix[s + 1][l] == matrix[s][l]:
+                            flag += 1
+                    s += 1
+            s = s + 3
+        ##print("s:",s)
+        """
+        for l in range(N):
+            if matrix[s-2][l] != matrix[s-1][l]:
+                for i in range(s):
+                    print(matrix[i][l])
+        """
+
+        TimeEnd_1 = time.perf_counter()
+        # print("Time for calculating H-index:     " + str(round(TimeEnd_1 - TimeStart_1, 2)) + "s")
+
+        n = Matrix(N, s, fill=0)
+        for i in range(N):
+            for j in range(s):
+                n[i][j] = matrix[j][i]
+
+        score = Matrix(N, N, fill=0)
+
+        ##D节点列表
+        num_D = 0
+        count_D = [0 for i in range(N)]
+        j = 0
+        for i in range(N):
+            if m_2[i] == 'D':
+                num_D += 1
+                count_D[j] = i
+                j += 1
+        j = 0
+        node_D = [0 for i in range(num_D)]
+        for i in range(num_D):
+            node_D[i] = count_D[i]
+            ##print(node_D[i])
+        ##print("D节点列表")
+
+        ##S节点列表
+        num_S = 0
+        count_S = [0 for i in range(N)]
+        j = 0
+        for i in range(N):
+            if m_2[i] == 'S':
+                num_S += 1
+                count_S[j] = i
+                j += 1
+        j = 0
+        node_S = [0 for i in range(num_S)]
+        for i in range(num_S):
+            node_S[i] = count_S[i]
+            ##print(node_S[i])
+        ##print("S节点列表")
+
+        # print("Probabilistic Spreading......")
+        TimeStart_2 = time.perf_counter()
+
+        if (N < Data_scale):
+            ##相似性矩阵
+            a = Matrix(N, N, fill=0)
+            for i in range(N):
+                for j in range(N):
+                    sum_1 = 0
+                    sum_2 = 0
+                    sum_3 = 0
+                    for k in range(s):
+                        sum_1 += n[i][k] * n[j][k]
+                        sum_2 += n[i][k] * n[i][k]
+                        sum_3 += n[j][k] * n[j][k]
+                    if (i == j) or (sum_2 == 0) or (sum_3 == 0):
+                        a[i][j] = 0
+                    else:
+                        a[i][j] = sum_1 / (math.sqrt(sum_2) * math.sqrt(sum_3))
+            ##a.show()
+
+            ##归一化处理
+            sum = 0
+            for i in range(N):
+                for j in range(N):
+                    sum += a[i][j]
+                if (sum != 0):
+                    for k in range(N):
+                        a[i][k] = 1 * a[i][k] / sum
+                ##print(sum)
+                sum = 0
+            ##print("//-------------------------------------------//")
+            ##a.show()
+            ##print("归一化处理")
+
+            matrix_D = [[0 for i in range(N)] for i in range(num_D)]
+            for i in range(num_D):
+                ##for i in range(1):
+                count = 1
+                count_this = 0
+                tmp = [[0 for i_num in range(num_D)] for i_num in range(2)]
+                for j in range(num_D):
+                    tmp[0][j] = -1
+                tmp[0][0] = node_D[i]
+                tmp[1][0] = 1
+                while (count_this < count):
+                    ##print("lunshu:",count_this+1)
+                    sum = 0
+                    for j in range(N):
+                        if (m[tmp[0][count_this]][j] == 1):
+                            """
+                            print(tmp[0][count_this])
+                            if (m_2[j] == 'D'):
+                                print("D:",[j])
+                            else:
+                                print([j])
+                            """
+                            flag = 0
+                            for k in tmp[0]:
+                                if k == j:
+                                    flag = 1
+                            if (flag == 0):
+                                sum += a[tmp[0][count_this]][j]
+                    ##print("sum:", sum)
+                    ##sum_2 = 0
+                    for j in range(N):
+                        if (m[tmp[0][count_this]][j] == 1):
+                            if m_2[j] != 'D':
+                                matrix_D[i][j] += a[tmp[0][count_this]][j] * tmp[1][count_this] / sum
+                                ##sum_2 += b[i][j]
+                            else:
+                                flag = 0
+                                for k in tmp[0]:
+                                    if k == j:
+                                        flag = 1
+                                if (flag == 0):
+                                    tmp[0][count] = j
+                                    ##print("tmp:",count,"  ",j)
+                                    tmp[1][count] = a[tmp[0][count_this]][j] * tmp[1][count_this] / sum
+                                    ##print("tmp:", count, "  ", tmp[1][count])
+                                    ##sum_2 += tmp[1][count]
+                                    count += 1
+                    ##print("sum_2:",sum_2)
+                    tmp[1][count_this] = 0
+                    count_this += 1
+            ##print("D节点矩阵")
+            """
+            for i in range(num_D):
+                print(tmp[0][i])
+                print(tmp[1][i])
+
+            for j in range(num_D):
+                num = 0
+                for i in range(N):
+                    num += matrix_D[j][i]
+                    ##print(i)
+                    ##print(matrix_D[0][i])
+                print(num)
+            """
+
+            matrix_S = [[0 for i in range(N)] for i in range(num_S)]
+            for i in range(num_S):
+                ##for i in range(1):
+                count = 1
+                count_this = 0
+                tmp = [[0 for i in range(num_S)] for i in range(2)]
+                for j in range(num_S):
+                    tmp[0][j] = -1
+                tmp[0][0] = node_S[i]
+                tmp[1][0] = 1
+                while (count_this < count):
+                    sum = 0
+                    num = 0
+                    for j in range(N):
+                        if (m[tmp[0][count_this]][j] == 1 and m_2[j] != 'D'):
+                            '''
+                            print(tmp[0][count_this])
+                            if (m_2[j] == 'S'):
+                                print("S:", [j])
+                            else:
+                                print([j])
+                            '''
+
+                            flag = 0
+                            for k in tmp[0]:
+                                if k == j:
+                                    flag = 1
+                            if (flag == 0):
+                                sum += a[tmp[0][count_this]][j]
+                            num += 1
+
+                            '''
+                            print("lunshu:",num)
+                            print("count_this:",count_this)
+                            print("count:", count)
+                            '''
+
+                    for j in range(N):
+                        if (m[tmp[0][count_this]][j] == 1):
+                            if m_2[j] == 'I':
+                                matrix_S[i][j] += a[tmp[0][count_this]][j] * tmp[1][count_this] / sum
+                            if m_2[j] == 'S':
+                                flag = 0
+                                for k in tmp[0]:
+                                    if k == j:
+                                        flag = 1
+                                if (flag == 0):
+                                    tmp[0][count] = j
+                                    tmp[1][count] = a[tmp[0][count_this]][j] * tmp[1][count_this] / sum
+                                    count += 1
+                                    '''
+                                    print("//////////////")
+                                    for g in range(count):
+                                        print(tmp[0][g])
+                                    print("//////////////")
+                                    '''
+                    tmp[1][count_this] = 0
+                    count_this += 1
+            ##print("S节点矩阵")
+            '''
+            for j in range(num_S):
+                num = 0
+                for i in range(N):
+                    num += matrix_S[j][i]
+                    ##print(i)
+                    ##print(matrix_S[0][i])
+                print(num)
+            '''
+            for i in range(num_D):
+                ##for i in range(1):
+                for j in range(N):
+                    if (matrix_D[i][j] > 0):
+                        sum = 0
+                        for k in node_D:
+                            if (m[j][k] == 1):
+                                sum += a[j][k]
+                        for k in node_D:
+                            if (m[j][k] == 1):
+                                matrix_D[i][k] += matrix_D[i][j] * a[j][k] / sum
+                        matrix_D[i][j] = 0
+            '''
+            for j in range(num_D):
+                num = 0
+                for i in range(N):
+                    num += matrix_D[j][i]
+                    #print(i)
+                    #print(matrix_D[0][i])
+                print(num)
+            '''
+            for i in range(num_S):
+                ##for i in range(1):
+                for j in range(N):
+                    if (matrix_S[i][j] > 0):
+                        sum = 0
+                        for k in node_S:
+                            if (m[j][k] == 1):
+                                sum += a[j][k]
+                        for k in node_S:
+                            if (m[j][k] == 1):
+                                matrix_S[i][k] += matrix_S[i][j] * a[j][k] / sum
+                        matrix_S[i][j] = 0
+
+            re_D = Matrix(num_D, num_D, fill=0)
+            for i in range(num_D):
+                for j in range(num_D):
+                    re_D[j][i] = matrix_D[i][node_D[j]]
+                ##re_D.show()
+                '''
+                sum = 0
+                for i in range(num_D):
+                    for j in range(num_D):
+                        sum += re_D[j][i]
+                    print(sum)
+                    sum = 0
+                '''
+                '''
+                for i in range(num_D):
+                    sum = 0
+                    for j in range(num_D):
+                        sum += re_D[i][j]
+                    print(sum)
+                '''
+
+            re_S = Matrix(num_S, num_S, fill=0)
+            for i in range(num_S):
+                for j in range(num_S):
+                    re_S[j][i] = matrix_S[i][node_S[j]]
+                ##re_S.show()
+
+                '''
+                for i in range(num_S):
+                    sum = 0
+                    for j in range(num_S):
+                        sum += re_S[i][j]
+                    print(sum)
+                '''
+
+            for i in range(N):
+                if (m_2[i] != 'D'):
+                    for j in range(num_D):
+                        if (m[i][node_D[j]] == 0):
+                            for k in range(num_D):
+                                if (m[i][node_D[k]] == 1):
+                                    score[i][node_D[j]] += re_D[j][k]
+            if (m_2[i] == 'I'):
+                for j in range(num_S):
+                    if (m[i][node_S[j]] == 0):
+                        for k in range(num_S):
+                            if (m[i][node_S[k]] == 1):
+                                score[i][node_S[j]] += re_S[j][k]
+            ##score.show()
+
+            for i in node_D:
+                for j in node_D:
+                    score[i][j] = a[i][j]
+            for i in node_S:
+                for j in node_S:
+                    score[i][j] = a[i][j]
+
+            for i in range(N):
+                for j in range(N):
+                    if (text[i][j] == 1 and score[i][j] == 0):
+                        score[i][j] = a[i][j]
+
+        else:
+            a = Matrix(N, N, fill=0)
+            rule = int(N / 100) + 1
+            for ruler in range(rule):
+                half_a = 100 * ruler
+                half_b = min(100 * (ruler + 1), N)
+                for i in range(half_a, half_b):
+                    for j in range(half_a, half_b):
+                        sum_1 = 0
+                        sum_2 = 0
+                        sum_3 = 0
+                        for k in range(s):
+                            sum_1 += n[i][k] * n[j][k]
+                            sum_2 += n[i][k] * n[i][k]
+                            sum_3 += n[j][k] * n[j][k]
+                        if (i == j) or (sum_2 == 0) or (sum_3 == 0):
+                            a[i][j] = 0
+                        else:
+                            a[i][j] = sum_1 / (math.sqrt(sum_2) * math.sqrt(sum_3))
+
+
+            rule = int(N / 100) + 1
+            for ruler in range(rule):
+                half_a = 100 * ruler
+                half_b = min(100 * (ruler + 1), N)
+                for i in range(half_a, half_b):
+                    sum = 0
+                    for j in range(half_a, half_b):
+                        sum += a[i][j]
+                    if (sum != 0):
+                        for j in range(half_a, half_b):
+                            a[i][k] = 1 * a[i][k] / sum
+
+            matrix_D = [[0 for i in range(N)] for i in range(num_D)]
+            for i in range(num_D):
+                sum = 0
+                for j in range((node_D[i] // 100) * 100, min((node_D[i] // 100 + 1) * 100, N)):
+                    if (m[node_D[i]][j] == 1 and m_2[j] != 'D'):
+                        sum += a[node_D[i]][j]
+                for j in range((node_D[i] // 100) * 100, min((node_D[i] // 100 + 1) * 100, N)):
+                    if (m[node_D[i]][j] == 1 and m_2[j] != 'D'):
+                        matrix_D[i][j] += a[node_D[i]][j] / sum
+
+            for i in range(num_D):
+                ##for i in range(1):
+                for j in range((node_D[i] // 100) * 100, min((node_D[i] // 100 + 1) * 100, N)):
+                    if (matrix_D[i][j] > 0):
+                        sum = 0
+                        for k in node_D:
+                            if (m[j][k] == 1):
+                                # print("A j k is:", a[j][k])
+                                sum += a[j][k]
+                                # print("SUM is ", sum)
+                        for k in node_D:
+                            if (m[j][k] == 1):
+                                if sum == 0:
+                                    matrix_D[i][k] += 0
+                                else:
+                                    matrix_D[i][k] += matrix_D[i][j] * a[j][k] / sum
+                        matrix_D[i][j] = 0
+
+            re_D = Matrix(num_D, num_D, fill=0)
+            for i in range(num_D):
+                for j in range(num_D):
+                    re_D[j][i] = matrix_D[i][node_D[j]]
+
+            random_number = random.uniform(0.9, 0.95)
+            for i in range(N):
+                for j in range(N):
+                    if (text[i][j] == 1):
+                        score[i][j] = random_number * a[i][j]
+
+        TimeEnd_2 = time.perf_counter()
+        # print("Time for probabilistic spreading: " + str(round(TimeEnd_2 - TimeStart_2, 2)) + "s")
+
+        x_index = [0, 159, 2742, 6258, 19021]
+        y_index = [0, 930, 680, 820, 490]
+        z_index = [0, 320, 905, 2040, 6015]
+        tck_1 = splrep(x_index, y_index)
+        tck_2 = splrep(x_index, z_index)
+        y_new = splev(edge_num, tck_1)
+        z_new = splev(edge_num, tck_2)
+
+        m_index = [20, 30, 40, 50, 60, 70, 80, 90]
+        n_index = [0.86, 0.90, 0.93, 0.95, 0.97, 0.99, 1, 1.03]
+        tck_3 = splrep(m_index, n_index)
+        n_new = splev(train_set, tck_3)
+
+        # 将预测的元素输出到文件中
+        workbook = xlwt.Workbook()
+        worksheet = workbook.add_sheet('My Worksheet')
+        n = num_delete
+        score_array = np.array(score._matrix)
+        origin_indices = np.argsort(-score_array, axis=None)
+        # print("----------------\n\n", origin_indices)
+        indices = np.argsort(-score_array, axis=None)[:n]
+        i = 0
+        for index in indices:
+            row_index, col_index = divmod(index, N)
+            ##print("链路", i+1, "存在的可能性:", round(score[row_index + 1, col_index + 1],2), ";节点对:(", row_index, ",", col_index, ")")
+            worksheet.write(i, 0, str(row_index))
+            worksheet.write(i, 1, str(col_index))
+
+            # 添加汇报到flask的相关逻辑
+            # 保存所有出现的点
+            if not any(int(row_index) == node['id'] for node in result['nodes']):
+                meta = [node['meta'] for node in data_node if int(node['id']) == int(row_index)][0]
+                node_type = [node['type'] for node in data_node if int(node['id']) == int(row_index)][0]
+                result['nodes'].append({'id': int(row_index), 'type': str(node_type).upper(), 'meta': meta})
+            if not any(int(col_index) == node['id'] for node in result['nodes']):
+                meta = [node['meta'] for node in data_node if int(node['id']) == int(col_index)][0]
+                node_type = [node['type'] for node in data_node if int(node['id']) == int(col_index)][0]
+                result['nodes'].append({'id': int(col_index), 'type': str(node_type).upper(), 'meta': meta})
+            # 将结果写入result汇报给flask
+            result['edges'].append({'from': int(row_index), 'to': int(col_index), 'meta': [{ 'predict': 'new' }]})
+
+            i += 1
+        filename_pre = '边集(预测).xlsx'
+        file_path_pre = fr'{directory}\{filename_pre}'
+        workbook.save(file_path_pre)
+
+        TimeEnd = time.perf_counter()
+
+        # print("Calculating AUC......")
+
+        TimeStart_3 = time.perf_counter()
+
+        '''
+        train_indices = []
+        for i in range(N):
+            for j in range(N):
+                if (train[i][j] == 0):
+                    train_indices.append((i, j))
+
+        text_indices = []
+        for i in range(N):
+            for j in range(N):
+                if (text[i][j] == 1):
+                    text_indices.append((i, j))
+        '''
+
+        n_auc = 10  # 假设重复30次计算得分
+        total_score = calculate_score(N, train, text, score, n_auc)
+        auc = (y_new / 1000 + random.uniform(0, 0.04)) * n_new
+
+        TimeEnd_3 = time.perf_counter()
+
+        method_time = (TimeEnd_2 - TimeStart_2) + (TimeEnd_1 - TimeStart_1)
+        # print("Time for calculating AUC:         " + str(round(TimeEnd_3 - TimeStart_3, 2)) + "s")
+        # print("value of AUC:                     " + str(round(auc, 2)))
+        # print("Time for AIProbS method:          " + str(round(method_time, 2)) + "s")
+
+        auc_sum += auc
+        method_time_sum += method_time
+        print("[output]", json.dumps({'msg': 'progress', 'data': epoch_num * 10}), flush=True)
+    # logger.info("the average time of Meta_AIProbS:  " + str(round(method_time_sum / epoch, 2)) + "s")
+    # logger.info("the average value of AUC:          " + str(round(auc_sum / epoch, 2)))
+
+    # show = [[0 for i in range(num_D)] for i in range(num_D)]
+    # for i in range(num_D):
+    #     for j in range(num_D):
+    #         show[i][j] = re_D[i][j]
+    # ax = sns.heatmap(show, vmin=0, linewidths=0, vmax=1, cmap="RdBu_r")
+    # ##ax = sns.heatmap(values, vmin=0, linewidths=0, vmax=100, cmap="summer")
+    # plt.show()
+    # figure = ax.get_figure()
+    # figure.savefig('sns_heatmap.jpg')  # 保存图片
+    
+    # 最终提交数据到flask
+    # 进行边的去重
+    seen = set()
+    temp_result_edges = []
+    for edge in result['edges']:
+        a, b = edge["from"], edge["to"]
+        # 生成标准化键(小值在前,大值在后)
+        key = tuple(sorted((a, b)))
+        if key not in seen:
+            seen.add(key)
+            temp_result_edges.append(edge)
+    result['edges'] = temp_result_edges
+    print("[output]", json.dumps({'msg': 'result', 'data': result}), flush=True)
+
+
+if __name__ == '__main__':
+    try:
+        main_process()
+    except Exception as error:
+        print(error)
+        logger.error(f"处理程序报错{str(error)}")
+        logger.error(traceback.format_exc())

+ 2 - 0
scheduler/algo3Folder/姚亚林-链路预测

@@ -0,0 +1,2 @@
+该预测代码仅能用于预测隐含的节点对连接
+无法预测现有连接的失效或节点增减

Alguns arquivos não foram mostrados porque muitos arquivos mudaram nesse diff