controller.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. import networkx as nx
  2. from networkx.algorithms import community # for community structure later
  3. import collections
  4. from matplotlib import pyplot as plt
  5. from networkx.algorithms import approximation as app
  6. import operator
  7. # from networkx.generators.community import LFR_benchmark_graph
  8. import math
  9. import time
  10. from itertools import repeat
  11. import copy
  12. import pickle
  13. import random
  14. import numpy as np
  15. import pandas as pd
  16. from functools import reduce
  17. from scipy.special import comb, perm
  18. from itertools import repeat
  19. import copy
  20. import time
  21. import os
  22. import csv
  23. import json
  24. import requests
  25. import traceback
  26. from itertools import combinations
  27. from collections import defaultdict
  28. from collections import deque
  29. from optparse import OptionParser
  30. ######################################
  31. # STEP 0: Initial graph ##
  32. ######################################
  33. # file = open('./generate/Graph_gpickleSCS.gpickle', 'rb')
  34. # Graph=pickle.load(file)
  35. ''' 准备数据 '''
  36. SCHEDULER_BASE_URL = os.getenv("SCHEDULER_BASE_URL")
  37. BACKEND_BASE_URL = os.getenv("BACKEND_BASE_URL")
  38. missionId = os.getenv("missionId")
  39. planId = os.getenv("planId")
  40. headers = {
  41. "Content-Type": "application/json", # 明确声明数据格式
  42. "Accept": "application/json" # 声明期望的响应格式
  43. }
  44. params = {
  45. "missionId": missionId,
  46. "planId": planId,
  47. }
  48. print("[output]", json.dumps({'msg': 'started'}), flush=True)
  49. response = requests.get(SCHEDULER_BASE_URL + '/fetchData', params=params, headers=headers)
  50. fetchedData = response.json()
  51. if not fetchedData:
  52. # 此处应当放置错误报告
  53. quit()
  54. # fetchedData: {'nodes': [] , 'edges': []}
  55. '''准备数据(完毕)'''
  56. # 更改为从flask获取数据
  57. input_nodes = []
  58. for line in fetchedData['nodes']:
  59. # 清空原有meta
  60. for meta in range(len(line['meta'])-1, -1, -1):
  61. if 'group' in line['meta'][meta]:
  62. del line['meta'][meta]
  63. input_nodes.append([int(line['id']), str(line['type']).upper()])
  64. input_edges = []
  65. for line in fetchedData['edges']:
  66. # 清空原有meta
  67. line['meta'] = []
  68. input_edges.append([int(line['from']), int(line['to'])])
  69. # 检测节点编号,本程序要求从0起
  70. flag = True
  71. for node in input_nodes:
  72. if int(node[0]) == 0:
  73. flag = False
  74. if flag:
  75. # 原始数据不从0开始,则所有节点的id均减一,同时边的节点id也减一
  76. for node in input_nodes:
  77. node[0] = int(node[0]) - 1
  78. # 同时修改原始输入数据
  79. for node in fetchedData['nodes']:
  80. node['id'] = int(node['id']) - 1
  81. for edge in input_edges:
  82. edge[0] = int(edge[0]) - 1
  83. edge[1] = int(edge[1]) - 1
  84. for edge in fetchedData['edges']:
  85. edge['from'] = int(edge['from']) - 1
  86. edge['to'] = int(edge['to']) - 1
  87. # print("测试输出节点和边")
  88. # print(input_nodes)
  89. # print(input_edges)
  90. # file = open('nodes.csv', 'r')
  91. # idx = 0
  92. # for line in file:
  93. # input_nodes.append([idx, line.replace('\n', '')])
  94. # idx += 1
  95. # file.close()
  96. # file = open('edges.csv', 'r')
  97. # csvfile = csv.reader(file)
  98. # idx = 0
  99. # for line in csvfile:
  100. # input_edges.append([int(line[0]), int(line[1])])
  101. Graph = nx.Graph()
  102. for i in input_nodes:
  103. Graph.add_nodes_from([i[0]],type=i[1])
  104. for i in input_edges:
  105. #G.add_weighted_edges_from([(i,j,random.random())])
  106. Graph.add_edges_from([(i[0], i[1])])
  107. ###### 手动输入图结构 ###########
  108. '''
  109. Graph = nx.DiGraph()
  110. nodesfile = open('./nodes', 'r')
  111. for line in nodesfile:
  112. nodeline = line.replace('\n','').split(',')
  113. Graph.add_node(int(nodeline[0]), type = nodeline[1])
  114. edgefile = open('./edges', 'r')
  115. for line in edgefile:
  116. edgeline = line.replace('\n', '').split(',')
  117. Graph.add_edges_from([(int(edgeline[0]), int(edgeline[1]))])
  118. '''
  119. ###############################
  120. G = Graph.to_undirected() # 无向图
  121. graphname = 'ori' + str(random.randint(10000, 99999))
  122. ## remove self loops & degree = 0
  123. # G.remove_edges_from(G.selfloop_edges(G, data=True)) #ckb change
  124. isola = [k for k in nx.isolates(G)]
  125. G.remove_nodes_from(isola)
  126. Dict_Centrality = nx.degree_centrality(G)
  127. Centrality = list(Dict_Centrality.values()) # degree centerality
  128. Name = list(Dict_Centrality.keys())
  129. A = nx.adjacency_matrix(G) # A = matix
  130. #code for search SDI
  131. nodes=Graph.nodes
  132. li = np.array(nx.adjacency_matrix(Graph).todense())# 转换为numpy矩阵是因为原始的格式不支持A[i][j]形式的索引
  133. class GrfAllEdge():
  134. # 定义方法,重点是items列表用作栈
  135. def __init__(self, total):
  136. self.total = total
  137. self.li = li # 使用局部邻接矩阵
  138. self.SDIi = []
  139. def bfs_paths(self,start,goal,max_depth=5):
  140. if start == goal:
  141. return
  142. queue = deque([(start,[start])])
  143. while queue:
  144. current,path = queue.popleft()
  145. if len(path) > max_depth:
  146. continue
  147. # 获取栈顶的类型,防止D到S的路径
  148. tp_current = Graph._node[path[-1]]['type']
  149. for next_node in range(self.total):
  150. if self.li[current][next_node] == 1 and next_node not in path:
  151. if len(path) >= max_depth:
  152. continue
  153. tp_next = Graph._node[next_node]['type']
  154. if tp_current == 'D' and tp_next == 'S':
  155. continue
  156. new_path = list(path) # 复制当前路径
  157. new_path.append(next_node) # 添加新节点到路径
  158. if next_node == goal: # 如果下一个节点是目标节点
  159. self.SDIi.append(new_path) # 添加到解决方案路径列表
  160. else:
  161. queue.append((next_node, new_path)) # 将新路径添加到队列
  162. def get_oc(node):
  163. #get SDI nodes
  164. SensSet, DeciSet, InfluSet = [], [], []
  165. for index in range(len(node)):
  166. tps = Graph._node[index]['type']
  167. if tps == 'S':
  168. SensSet.append(index)
  169. elif tps == 'D':
  170. DeciSet.append(index)
  171. elif tps == 'I':
  172. InfluSet.append(index)
  173. #get OC by DFS
  174. OC_ALL = []
  175. for orig in SensSet: #sensor nodes
  176. for goal in InfluSet: #influencer nodes
  177. edge = GrfAllEdge(len(node))
  178. edge.bfs_paths(orig,goal)
  179. OC_ALL.extend(edge.SDIi)
  180. return OC_ALL
  181. # 提取社团中的节点编号
  182. def get_community_nodes(community):
  183. return [node[0] for node in community]
  184. def get_communities_oc(communities):
  185. communities_oc = {}
  186. for community_name, community_nodes in communities.items():
  187. community_nodes_ids = get_community_nodes(community_nodes)
  188. communities_oc[community_name] = get_oc(community_nodes_ids)
  189. return communities_oc
  190. #转换作战链数为相对作战链矩阵
  191. def transform_matrix(matrix):
  192. # 获取矩阵形状和扁平化的矩阵
  193. rows, cols = matrix.shape
  194. flat_matrix = matrix.flatten()
  195. # 获取非零元素及其索引
  196. non_zero_indices = np.where(flat_matrix != 0)[0]
  197. non_zero_elements = flat_matrix[non_zero_indices]
  198. # 获取唯一值及其反向索引,用于构建排名
  199. unique_values, inverse_indices = np.unique(-non_zero_elements, return_inverse=True)
  200. # 对唯一值排名,这里使用负数是因为我们希望降序排列
  201. ranks = np.zeros_like(non_zero_elements, dtype=int)
  202. for i in range(len(unique_values)):
  203. ranks[inverse_indices == i] = i + 1
  204. # 将排名映射回原始矩阵位置
  205. ranked_non_zero_elements = np.zeros_like(flat_matrix, dtype=int)
  206. ranked_non_zero_elements[non_zero_indices] = ranks
  207. # 调整矩阵,将零元素的排名设置为最大排名加一
  208. max_rank = np.max(ranks)
  209. ranked_non_zero_elements[ranked_non_zero_elements == 0] = max_rank + 1
  210. # 重新形成原始矩阵的形状
  211. ranked_matrix = ranked_non_zero_elements.reshape(rows, cols)
  212. for i in range(len(ranked_matrix)):
  213. for j in range(len(ranked_matrix)):
  214. if i == j:
  215. ranked_matrix[i][j] = 0
  216. return ranked_matrix
  217. #缩小作战链矩阵的算法
  218. def Matrix_shrink_oc(oc_temp,ii,jj):
  219. k1 = oc_temp[:,ii]
  220. k2 = oc_temp[:,jj]
  221. dd = np.delete(oc_temp,[ii,jj],1)
  222. dd = np.delete(dd,[ii,jj],0)
  223. kk = np.maximum(k1,k2)
  224. kk = np.delete(kk,[ii,jj],0)
  225. m1 = np.vstack([dd,kk])
  226. m2 = np.append(kk,0)
  227. shrank = np.vstack([m1.T,m2])
  228. return shrank
  229. def main_function():
  230. ######################################
  231. # STEP 1: Identification of sources ##
  232. ######################################
  233. # print('##### STEP 1 #####')
  234. # print('--------------------')
  235. start_s1 = time.perf_counter()
  236. source = []
  237. sink = []
  238. iso = []
  239. leaf = []
  240. nodetemp = list(G.nodes)
  241. count_s1 = 0 # count nodes
  242. for i in nodetemp:
  243. count_s1 += 1
  244. # if count_s1 % 1000 == 0: # calculate time per 1000 nodes
  245. # print('Time Elapsed--- ' + str((time.perf_counter() - start_s1)) + ' Node:' + str(count_s1) + '/' + str(
  246. # len(G)) + '\n')
  247. nei = list(G.neighbors(i))
  248. iso_count = 0
  249. source_count = 0
  250. sink_count = 0
  251. if len(nei) == 1: # leaf
  252. leaf.append(i)
  253. continue
  254. for ii in nei: # counter
  255. '''
  256. node > neibour:source++
  257. node == neibour:isolate++
  258. '''
  259. if Dict_Centrality.get(i) > Dict_Centrality.get(ii):
  260. source_count += 1
  261. elif Dict_Centrality.get(i) == Dict_Centrality.get(ii):
  262. iso_count += 1
  263. source_count += 1 # ?
  264. else:
  265. sink_count += 1
  266. continue
  267. if iso_count == G.degree(i): # all the
  268. if all(Centrality[Name.index(p)] == Centrality[Name.index(i)] for p in list(G.neighbors(i))): # clique
  269. if not any(w in source for w in list(G.neighbors(i))): # 顺序的问题?
  270. source.append(i) # get one as hub, the other are inner members
  271. Centrality[Name.index(i)] += 0.5 # additive value to this hub
  272. else:
  273. iso.append(i) # non-clique
  274. if source_count == G.degree(i):
  275. if i not in iso and i not in source: # source: greater than at least one neighbor in centrality score
  276. source.append(i)
  277. if sink_count == G.degree(i) & G.degree(i) > 1:
  278. sink.append(i)
  279. # 完成第一步,进度20%
  280. print("[output]", json.dumps({'msg': 'progress', 'data': 20}), flush=True)
  281. r_source = len(source) / len(G) # proportion of source
  282. r_sink = len(sink) / len(G) # proportion of sink
  283. inner = len(G) - len(source) - len(sink) - len(iso) - len(leaf)
  284. #############################################################
  285. # STEP 2: Propagation and Formulation of Local Communities ##
  286. #############################################################
  287. # print('##### STEP 2 #####')
  288. # print('--------------------')
  289. start_s2 = time.perf_counter()
  290. History = [[] for i in repeat(None, len(nx.nodes(G)))] # H = (history,time)
  291. community = [[] for i in repeat(None, len(source))] # X = (source_node,time)
  292. t = 0
  293. tmax = 100
  294. time_record = []
  295. for i in range(len(source)):
  296. community[i].append((source[i], t)) # first label , first contagion time
  297. History[Name.index(source[i])] = [(source[i], 0)]
  298. while t < tmax:
  299. if t % 10 == 0 and t > 0:
  300. print("[output]", json.dumps({'msg': 'progress', 'data': int(20 + t / tmax * 30)}), flush=True)
  301. old_community = copy.deepcopy(community)
  302. old_history = copy.deepcopy(History)
  303. t = t + 1
  304. for i in range(len(source)): # all propagation happens at the same time
  305. # if (i + 1) % 100 == 0:
  306. # print('Iteration:' + str(t) + '/' + str(tmax) + '---' + 'Source:' + str(i + 1) + '/' + str(
  307. # len(source)) + '---Time Elapsed---' + str(
  308. # (time.perf_counter() - start_s2)) + '---CommunitySize---' + str(len(community[i])))
  309. for j in community[i]:
  310. if j[1] == t - 1: # newly join the community from last round propagation
  311. for s in G.neighbors(j[0]):
  312. if Centrality[Name.index(s)] < Centrality[Name.index(j[0])]:
  313. if s not in [k[0] for k in community[i]]:
  314. community[i].append((s, t))
  315. History[Name.index(s)].append((source[i], t))
  316. time_record.append((time.perf_counter() - start_s2))
  317. if old_community == community or old_history == History: # no change in History or community membership
  318. break
  319. # check History and community are consistent #
  320. if sum(len(History[i]) for i in range(len(History))) != sum(len(community[i]) for i in range(len(community))):
  321. print('WRONG! COMMUNITY AND HISTORY DONT MATCH!')
  322. ave_membership = sum(len(History[i]) for i in range(len(History))) / len(History) # mh
  323. ave_size = sum(len(community[i]) for i in range(len(community))) / len(community) # mx
  324. # mh = len(S)/N * mx ?
  325. elapsed = (time.perf_counter() - start_s2)
  326. # plot local communities #
  327. from matplotlib import colors as mcolors
  328. colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
  329. old_co = list(community)
  330. old_his = list(History)
  331. len_hist = [len(hh) for hh in History]
  332. r_crossover = len(len_hist) - len_hist.count(1)
  333. ###############################################
  334. # STEP 3&4: Aggregation of Small Communities ##
  335. ###############################################
  336. # print('##### STEP 3&4 #####')
  337. # print('--------------------')
  338. start_s3 = time.perf_counter()
  339. #save chain
  340. # import os
  341. # current_path = os.path.dirname(os.path.abspath(__file__))
  342. # file_path = os.path.join(current_path, "oc.txt")
  343. #write
  344. # OC_ALL = get_oc(nodes)
  345. # with open(file_path,"w") as file:
  346. # for path in OC_ALL:
  347. # if path:
  348. # file.write(",".join(map(str,path)) + "\n")
  349. #read chain
  350. # with open(file_path, "r") as file:
  351. # OC_ALL = [list(map(int, line.strip().split(','))) for line in file]
  352. #get operation chain in any community
  353. community_dict = {}
  354. for comm in community:
  355. community_name = comm[0][0]
  356. community_dict[community_name] = comm
  357. # print(community_dict)
  358. # 注意community_dict中即保存了功能体结构
  359. # 取消将community_dict保存至文件,改为将其直接传递至flask
  360. # community_file = open('community.csv', 'w', newline='')
  361. # community_csv = csv.writer(community_file)
  362. # for community_source_node in community_dict:
  363. # for member_node in community_dict[community_source_node]:
  364. # community_csv.writerow([member_node[0], member_node[1], community_source_node])
  365. # community_file.close()
  366. # print("SOURCE", source)
  367. print("[output]", json.dumps({'msg': 'progress', 'data': 80}), flush=True)
  368. try:
  369. source_oc = get_communities_oc(community_dict)
  370. #get shared chain matrix
  371. OC_source = np.zeros((len(source),len(source)))
  372. for i,community_id_1 in enumerate(source):
  373. for j,community_id_2 in enumerate(source[i+1:],i+1):#只遍历上三角
  374. chains_1 = set(map(tuple, source_oc[community_id_1]))
  375. chains_2 = set(map(tuple, source_oc[community_id_2]))
  376. shared_chains = chains_1.intersection(chains_2)
  377. # if i == 3 and j ==4:
  378. # print(shared_chains)
  379. shared_count = len(shared_chains)
  380. OC_source[i][j] += shared_count
  381. OC_source[j][i] += shared_count # 利用对称性将值赋给矩阵的另一半
  382. except Exception as error:
  383. print(error, flush=True)
  384. # print(OC_source)
  385. # for i in range(len(source)):
  386. # for j in range(len(source)):
  387. # if i == j:
  388. # continue
  389. # else:
  390. # shared_oc = set(source_oc[i]).intersection(source_oc[j])
  391. # OC_source[i][j] += len(shared_oc)
  392. # print(OC_source)
  393. # OC_source_new = transform_matrix(OC_source).astype(int)
  394. # # print(OC_source_new)
  395. # #epsilon
  396. # epsilon_max = int(OC_source_new.max())
  397. # hierarchy_community = [list(source)]
  398. # epsilon_community_size = [(len(OC_source_new), 0)]
  399. # oc_temp = OC_source_new
  400. # oc_record = [list(oc_temp)]
  401. # phi_list = [] ## list of phi-epsilon
  402. # phi_ref_list = [] ## list of reference phi-epsilon
  403. # print("[output]", json.dumps({'msg': 'progress', 'data': 90}), flush=True)
  404. # for l in range(1,epsilon_max + 1):
  405. # # print('Epsilon:' + str(l) + '/' + str(epsilon_max) + '---' + 'Time Elapsed:' + str((time.perf_counter() - start_s3)))
  406. # temp = list(hierarchy_community[-1])
  407. # merging_count = 0 # count of num of merging (in each epsilon)
  408. # while True:
  409. # ij = np.argwhere(oc_temp == l) # Note: l starts from 1
  410. # # print("Ep = ",str(l),"ij = ",ij)
  411. # if len(ij) == 0: # no element == l
  412. # break
  413. # merging_count += 1
  414. # #change
  415. # rand_index = np.random.choice(len(ij))
  416. # ii, jj = ij[rand_index]
  417. # # ii = ij[0][0]
  418. # # jj = ij[0][1]
  419. # if type(temp[ii]) != list: # str to list
  420. # temp[ii] = [temp[ii]]
  421. # if type(temp[jj]) != list: # str to list
  422. # temp[jj] = [temp[jj]]
  423. # temp_com = temp[ii] + temp[jj] #merge community
  424. # tempp = [temp[ii],temp[jj]]
  425. # tempp_copy = list(tempp)
  426. # # print("--------------------")
  427. # # print("temp = ", temp, " Ep = ", str(l))
  428. # # print("temp[ii] = ",temp[ii]," temp[jj] = ",temp[jj]," temp_com = ",temp_com," tempp = ",tempp," temp_copy = ",tempp_copy)
  429. # # print("--------------------")
  430. # if len(temp[ii]) == 1:
  431. # tempp_copy[0] = temp[ii][0]
  432. # if len(temp[jj]) == 1:
  433. # tempp_copy[1] = temp[jj][0]
  434. # #merge community
  435. # temp.remove(tempp[0]) # remove old small community 1
  436. # temp.remove(tempp[1]) # remove old small community 2
  437. # temp.append(temp_com)
  438. # #shrink oc_matrix
  439. # oc_temp = Matrix_shrink_oc(oc_temp,ii,jj)
  440. # # print("oc_temp = ")
  441. # # print(oc_temp)
  442. # oc_record.append(oc_temp)
  443. # # jac_record.append(jac_temp)
  444. # hierarchy_community.append(temp)
  445. # epsilon_community_size.append((len(oc_temp),l+1))
  446. # print("hierarchy_community = ",hierarchy_community)
  447. # 注意hierarchy_community中保存了不同社团的层级关系,但是目前无法利用
  448. ## unconnected components ## i think oc_bad can merge
  449. # if len(np.argwhere(oc_temp == int(OC_source_new.max()))) == len(oc_temp)*(len(oc_temp)-1):#int(OC_source_new.max())is dummy
  450. # break
  451. # 准备汇报flask的数据
  452. result = {
  453. 'missionId': missionId,
  454. 'planId': planId,
  455. 'progress': 100,
  456. 'nodes': [],
  457. 'edges': [],
  458. }
  459. # 将节点和边直接放入nodes和edges
  460. result['nodes'] = fetchedData['nodes'].copy()
  461. # 删除可能存在的旧的group信息
  462. for n in result['nodes']:
  463. for meta_index in range(len(n['meta'])-1, -1, -1):
  464. if 'group' in n['meta'][meta_index]:
  465. del n['meta'][meta_index]
  466. result['edges'] = fetchedData['edges'].copy()
  467. # print(result['edges'])
  468. # print(result['nodes'])
  469. # 构建功能体核心节点与功能体编号的映射
  470. groups = {}
  471. group_index = 0
  472. for leader in community_dict:
  473. groups[group_index] = leader
  474. for group_node in community_dict[leader]:
  475. # 修改节点的meta,标注其所属的功能体
  476. node = [n for n in result['nodes'] if int(n['id']) == int(group_node[0])][0]
  477. for dicts in node['meta']:
  478. if type(dicts) != dict:
  479. print("ERROR, WRONG META", node['meta'])
  480. raise ValueError("ERROR, WRONG META")
  481. node['meta'].append({
  482. 'group': group_index,
  483. })
  484. group_index += 1
  485. print("[output]", json.dumps({'msg': 'result', 'data': result}), flush=True)
  486. ## refine hierarchy_community 0 ##
  487. # for i in range(len(hierarchy_community[0])):
  488. # hierarchy_community[0][i] = [(hierarchy_community[0][i])]
  489. #get hierarchy_nodes
  490. # com_node_dic = {}
  491. # for i in range(len(source)):
  492. # com_node_dic[source[i]] = community_nodes[i]
  493. # hierarchy_nodes = []
  494. # for hel_com in hierarchy_community:#对于层次的每一层
  495. # level_temp = []
  496. # for i in hel_com:#对于每层中的各个社团
  497. # if not isinstance(i,list):#不是小社团
  498. # nodes_all = set(com_node_dic[i]) if isinstance(com_node_dic[i], (list, set, tuple)) else set([com_node_dic[i]])
  499. # level_temp.append(nodes_all)
  500. # else:
  501. # nodes_all = set()
  502. # for j in i:
  503. # nodes_all.update(com_node_dic[j] if isinstance(com_node_dic[j], (list, set, tuple)) else [com_node_dic[j]])
  504. # level_temp.append(nodes_all)
  505. # hierarchy_nodes.append(level_temp)
  506. #
  507. # nodetemp = list(G.nodes)
  508. # sensors = 0 # 传感器
  509. # deciders = 0 # 决策者
  510. # influencer = 0 # 影响者
  511. # for i in range(len(nodetemp)):
  512. # tps = G._node[i]['type']
  513. # if tps == 'S':
  514. # sensors = sensors + 1
  515. # elif tps == 'D':
  516. # deciders = deciders + 1
  517. # else:
  518. # influencer = influencer + 1
  519. # print("Num of node S:" + str(sensors))
  520. # print("Num of node D:" + str(deciders))
  521. # print("Num of node I:" + str(influencer))
  522. # print('Num of nodes:'+ str(len(G.nodes)))
  523. # print('Num of edges:'+ str(len(G.edges)))
  524. # print('Num of operation chains:'+ str(len(OC_ALL)))
  525. # print('Num of sources:'+ str(len(source)))
  526. # print('Num of sinks:'+ str(len(sink)))
  527. # print('Num of isolated nodes:'+ str(len(iso)))
  528. # print('Num of leaf nodes:'+ str(len(leaf)))
  529. # print('Num of inner members:'+ str(inner))
  530. # print("hierarchy_community = ",hierarchy_community)
  531. # print("epsilon_community_size = ",epsilon_community_size)
  532. # print("epsilon_max = ",epsilon_max)
  533. # # save files
  534. # g = nx.read_gpickle("./generate/Graph_gpickleSCS.gpickle")
  535. # # #1.leaf sink hub
  536. # # 为列表中的每一个节点添加属性,同时检查节点是否存在
  537. # for node_list, node_type in zip([source, sink, iso, leaf], ["hub", "sink", "isolated", "leaf"]):
  538. # for node in node_list:
  539. # # 检查节点是否存在于图中
  540. # if node in g:
  541. # g._node[node]["detect_node"] = node_type
  542. # else:
  543. # # 如果节点不存在,输出一个错误消息
  544. # print(f"Node {node} not found in graph.")
  545. #2.small community
  546. # for community_index, nodes in enumerate(community_nodes):
  547. # for node in nodes:
  548. # if g.has_node(node):
  549. # g._node[node]["community"] = community_index
  550. #3.hierarchy_community
  551. # 遍历hierarchy_community和hierarchy_nodes来建立每个节点的属性
  552. # for level, (communities, nodes) in enumerate(zip(hierarchy_community, hierarchy_nodes)):
  553. # for community_id, community_nodes in zip(communities, nodes):
  554. # # 如果community_id是列表,那么社团需要合并
  555. # if isinstance(community_id, list):
  556. # for sub_community in community_id:
  557. # for node in community_nodes:
  558. # g._node[node] = {'community_id': sub_community, 'hierarchy_level': level}
  559. # else:
  560. # for node in community_nodes:
  561. # g._node[node] = {'community_id': community_id, 'hierarchy_level': level}
  562. # path = './generate/Graph_gml' + str(level)+'.gml'
  563. # nx.write_gml(g, path)
  564. # nx.write_gml(g, './generate/my_Graph_gml.gml')
  565. # nx.write_gpickle(g, "./generate/Graph_gpickleSCS.gpickle")
  566. if __name__ == '__main__':
  567. try:
  568. main_function()
  569. except Exception as error:
  570. print(str(error))
  571. print(traceback.format_exc())
  572. print("END ERROR")