123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648 |
- import networkx as nx
- from networkx.algorithms import community # for community structure later
- import collections
- from matplotlib import pyplot as plt
- from networkx.algorithms import approximation as app
- import operator
- # from networkx.generators.community import LFR_benchmark_graph
- import math
- import time
- from itertools import repeat
- import copy
- import pickle
- import random
- import numpy as np
- import pandas as pd
- from functools import reduce
- from scipy.special import comb, perm
- from itertools import repeat
- import copy
- import time
- import os
- import csv
- import json
- import requests
- import traceback
- from itertools import combinations
- from collections import defaultdict
- from collections import deque
- from optparse import OptionParser
- ######################################
- # STEP 0: Initial graph ##
- ######################################
- # file = open('./generate/Graph_gpickleSCS.gpickle', 'rb')
- # Graph=pickle.load(file)
- ''' 准备数据 '''
- SCHEDULER_BASE_URL = os.getenv("SCHEDULER_BASE_URL")
- BACKEND_BASE_URL = os.getenv("BACKEND_BASE_URL")
- missionId = os.getenv("missionId")
- planId = os.getenv("planId")
- headers = {
- "Content-Type": "application/json", # 明确声明数据格式
- "Accept": "application/json" # 声明期望的响应格式
- }
- params = {
- "missionId": missionId,
- "planId": planId,
- }
- print("[output]", json.dumps({'msg': 'started'}), flush=True)
- response = requests.get(SCHEDULER_BASE_URL + '/fetchData', params=params, headers=headers)
- fetchedData = response.json()
- if not fetchedData:
- # 此处应当放置错误报告
- quit()
- # fetchedData: {'nodes': [] , 'edges': []}
- '''准备数据(完毕)'''
- # 更改为从flask获取数据
- input_nodes = []
- for line in fetchedData['nodes']:
- # 清空原有meta
- for meta in range(len(line['meta'])-1, -1, -1):
- if 'group' in line['meta'][meta]:
- del line['meta'][meta]
- input_nodes.append([int(line['id']), str(line['type']).upper()])
- input_edges = []
- for line in fetchedData['edges']:
- # 清空原有meta
- line['meta'] = []
- input_edges.append([int(line['from']), int(line['to'])])
- # 检测节点编号,本程序要求从0起
- flag = True
- for node in input_nodes:
- if int(node[0]) == 0:
- flag = False
- if flag:
- # 原始数据不从0开始,则所有节点的id均减一,同时边的节点id也减一
- for node in input_nodes:
- node[0] = int(node[0]) - 1
- # 同时修改原始输入数据
- for node in fetchedData['nodes']:
- node['id'] = int(node['id']) - 1
- for edge in input_edges:
- edge[0] = int(edge[0]) - 1
- edge[1] = int(edge[1]) - 1
- for edge in fetchedData['edges']:
- edge['from'] = int(edge['from']) - 1
- edge['to'] = int(edge['to']) - 1
- # print("测试输出节点和边")
- # print(input_nodes)
- # print(input_edges)
- # file = open('nodes.csv', 'r')
- # idx = 0
- # for line in file:
- # input_nodes.append([idx, line.replace('\n', '')])
- # idx += 1
- # file.close()
- # file = open('edges.csv', 'r')
- # csvfile = csv.reader(file)
- # idx = 0
- # for line in csvfile:
- # input_edges.append([int(line[0]), int(line[1])])
-
- Graph = nx.Graph()
- for i in input_nodes:
- Graph.add_nodes_from([i[0]],type=i[1])
- for i in input_edges:
- #G.add_weighted_edges_from([(i,j,random.random())])
- Graph.add_edges_from([(i[0], i[1])])
- ###### 手动输入图结构 ###########
- '''
- Graph = nx.DiGraph()
- nodesfile = open('./nodes', 'r')
- for line in nodesfile:
- nodeline = line.replace('\n','').split(',')
- Graph.add_node(int(nodeline[0]), type = nodeline[1])
- edgefile = open('./edges', 'r')
- for line in edgefile:
- edgeline = line.replace('\n', '').split(',')
- Graph.add_edges_from([(int(edgeline[0]), int(edgeline[1]))])
- '''
- ###############################
-
-
- G = Graph.to_undirected() # 无向图
- graphname = 'ori' + str(random.randint(10000, 99999))
- ## remove self loops & degree = 0
- # G.remove_edges_from(G.selfloop_edges(G, data=True)) #ckb change
- isola = [k for k in nx.isolates(G)]
- G.remove_nodes_from(isola)
- Dict_Centrality = nx.degree_centrality(G)
- Centrality = list(Dict_Centrality.values()) # degree centerality
- Name = list(Dict_Centrality.keys())
- A = nx.adjacency_matrix(G) # A = matix
- #code for search SDI
- nodes=Graph.nodes
- li = np.array(nx.adjacency_matrix(Graph).todense())# 转换为numpy矩阵是因为原始的格式不支持A[i][j]形式的索引
- class GrfAllEdge():
- # 定义方法,重点是items列表用作栈
- def __init__(self, total):
- self.total = total
- self.li = li # 使用局部邻接矩阵
- self.SDIi = []
- def bfs_paths(self,start,goal,max_depth=5):
- if start == goal:
- return
- queue = deque([(start,[start])])
- while queue:
- current,path = queue.popleft()
- if len(path) > max_depth:
- continue
- # 获取栈顶的类型,防止D到S的路径
- tp_current = Graph._node[path[-1]]['type']
- for next_node in range(self.total):
- if self.li[current][next_node] == 1 and next_node not in path:
- if len(path) >= max_depth:
- continue
- tp_next = Graph._node[next_node]['type']
- if tp_current == 'D' and tp_next == 'S':
- continue
- new_path = list(path) # 复制当前路径
- new_path.append(next_node) # 添加新节点到路径
- if next_node == goal: # 如果下一个节点是目标节点
- self.SDIi.append(new_path) # 添加到解决方案路径列表
- else:
- queue.append((next_node, new_path)) # 将新路径添加到队列
- def get_oc(node):
- #get SDI nodes
- SensSet, DeciSet, InfluSet = [], [], []
- for index in range(len(node)):
- tps = Graph._node[index]['type']
- if tps == 'S':
- SensSet.append(index)
- elif tps == 'D':
- DeciSet.append(index)
- elif tps == 'I':
- InfluSet.append(index)
- #get OC by DFS
- OC_ALL = []
- for orig in SensSet: #sensor nodes
- for goal in InfluSet: #influencer nodes
- edge = GrfAllEdge(len(node))
- edge.bfs_paths(orig,goal)
- OC_ALL.extend(edge.SDIi)
- return OC_ALL
- # 提取社团中的节点编号
- def get_community_nodes(community):
- return [node[0] for node in community]
- def get_communities_oc(communities):
- communities_oc = {}
- for community_name, community_nodes in communities.items():
- community_nodes_ids = get_community_nodes(community_nodes)
- communities_oc[community_name] = get_oc(community_nodes_ids)
- return communities_oc
- #转换作战链数为相对作战链矩阵
- def transform_matrix(matrix):
- # 获取矩阵形状和扁平化的矩阵
- rows, cols = matrix.shape
- flat_matrix = matrix.flatten()
- # 获取非零元素及其索引
- non_zero_indices = np.where(flat_matrix != 0)[0]
- non_zero_elements = flat_matrix[non_zero_indices]
- # 获取唯一值及其反向索引,用于构建排名
- unique_values, inverse_indices = np.unique(-non_zero_elements, return_inverse=True)
- # 对唯一值排名,这里使用负数是因为我们希望降序排列
- ranks = np.zeros_like(non_zero_elements, dtype=int)
- for i in range(len(unique_values)):
- ranks[inverse_indices == i] = i + 1
- # 将排名映射回原始矩阵位置
- ranked_non_zero_elements = np.zeros_like(flat_matrix, dtype=int)
- ranked_non_zero_elements[non_zero_indices] = ranks
- # 调整矩阵,将零元素的排名设置为最大排名加一
- max_rank = np.max(ranks)
- ranked_non_zero_elements[ranked_non_zero_elements == 0] = max_rank + 1
- # 重新形成原始矩阵的形状
- ranked_matrix = ranked_non_zero_elements.reshape(rows, cols)
- for i in range(len(ranked_matrix)):
- for j in range(len(ranked_matrix)):
- if i == j:
- ranked_matrix[i][j] = 0
- return ranked_matrix
- #缩小作战链矩阵的算法
- def Matrix_shrink_oc(oc_temp,ii,jj):
- k1 = oc_temp[:,ii]
- k2 = oc_temp[:,jj]
- dd = np.delete(oc_temp,[ii,jj],1)
- dd = np.delete(dd,[ii,jj],0)
- kk = np.maximum(k1,k2)
- kk = np.delete(kk,[ii,jj],0)
- m1 = np.vstack([dd,kk])
- m2 = np.append(kk,0)
- shrank = np.vstack([m1.T,m2])
- return shrank
- def main_function():
- ######################################
- # STEP 1: Identification of sources ##
- ######################################
- # print('##### STEP 1 #####')
- # print('--------------------')
- start_s1 = time.perf_counter()
- source = []
- sink = []
- iso = []
- leaf = []
- nodetemp = list(G.nodes)
- count_s1 = 0 # count nodes
- for i in nodetemp:
- count_s1 += 1
- # if count_s1 % 1000 == 0: # calculate time per 1000 nodes
- # print('Time Elapsed--- ' + str((time.perf_counter() - start_s1)) + ' Node:' + str(count_s1) + '/' + str(
- # len(G)) + '\n')
- nei = list(G.neighbors(i))
- iso_count = 0
- source_count = 0
- sink_count = 0
- if len(nei) == 1: # leaf
- leaf.append(i)
- continue
- for ii in nei: # counter
- '''
- node > neibour:source++
- node == neibour:isolate++
- '''
- if Dict_Centrality.get(i) > Dict_Centrality.get(ii):
- source_count += 1
- elif Dict_Centrality.get(i) == Dict_Centrality.get(ii):
- iso_count += 1
- source_count += 1 # ?
- else:
- sink_count += 1
- continue
- if iso_count == G.degree(i): # all the
- if all(Centrality[Name.index(p)] == Centrality[Name.index(i)] for p in list(G.neighbors(i))): # clique
- if not any(w in source for w in list(G.neighbors(i))): # 顺序的问题?
- source.append(i) # get one as hub, the other are inner members
- Centrality[Name.index(i)] += 0.5 # additive value to this hub
- else:
- iso.append(i) # non-clique
- if source_count == G.degree(i):
- if i not in iso and i not in source: # source: greater than at least one neighbor in centrality score
- source.append(i)
- if sink_count == G.degree(i) & G.degree(i) > 1:
- sink.append(i)
-
- # 完成第一步,进度20%
- print("[output]", json.dumps({'msg': 'progress', 'data': 20}), flush=True)
- r_source = len(source) / len(G) # proportion of source
- r_sink = len(sink) / len(G) # proportion of sink
- inner = len(G) - len(source) - len(sink) - len(iso) - len(leaf)
- #############################################################
- # STEP 2: Propagation and Formulation of Local Communities ##
- #############################################################
- # print('##### STEP 2 #####')
- # print('--------------------')
- start_s2 = time.perf_counter()
- History = [[] for i in repeat(None, len(nx.nodes(G)))] # H = (history,time)
- community = [[] for i in repeat(None, len(source))] # X = (source_node,time)
- t = 0
- tmax = 100
- time_record = []
- for i in range(len(source)):
- community[i].append((source[i], t)) # first label , first contagion time
- History[Name.index(source[i])] = [(source[i], 0)]
- while t < tmax:
- if t % 10 == 0 and t > 0:
- print("[output]", json.dumps({'msg': 'progress', 'data': int(20 + t / tmax * 30)}), flush=True)
- old_community = copy.deepcopy(community)
- old_history = copy.deepcopy(History)
- t = t + 1
- for i in range(len(source)): # all propagation happens at the same time
- # if (i + 1) % 100 == 0:
- # print('Iteration:' + str(t) + '/' + str(tmax) + '---' + 'Source:' + str(i + 1) + '/' + str(
- # len(source)) + '---Time Elapsed---' + str(
- # (time.perf_counter() - start_s2)) + '---CommunitySize---' + str(len(community[i])))
- for j in community[i]:
- if j[1] == t - 1: # newly join the community from last round propagation
- for s in G.neighbors(j[0]):
- if Centrality[Name.index(s)] < Centrality[Name.index(j[0])]:
- if s not in [k[0] for k in community[i]]:
- community[i].append((s, t))
- History[Name.index(s)].append((source[i], t))
- time_record.append((time.perf_counter() - start_s2))
- if old_community == community or old_history == History: # no change in History or community membership
- break
- # check History and community are consistent #
- if sum(len(History[i]) for i in range(len(History))) != sum(len(community[i]) for i in range(len(community))):
- print('WRONG! COMMUNITY AND HISTORY DONT MATCH!')
- ave_membership = sum(len(History[i]) for i in range(len(History))) / len(History) # mh
- ave_size = sum(len(community[i]) for i in range(len(community))) / len(community) # mx
- # mh = len(S)/N * mx ?
- elapsed = (time.perf_counter() - start_s2)
- # plot local communities #
- from matplotlib import colors as mcolors
- colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
- old_co = list(community)
- old_his = list(History)
- len_hist = [len(hh) for hh in History]
- r_crossover = len(len_hist) - len_hist.count(1)
- ###############################################
- # STEP 3&4: Aggregation of Small Communities ##
- ###############################################
- # print('##### STEP 3&4 #####')
- # print('--------------------')
- start_s3 = time.perf_counter()
- #save chain
- # import os
- # current_path = os.path.dirname(os.path.abspath(__file__))
- # file_path = os.path.join(current_path, "oc.txt")
- #write
- # OC_ALL = get_oc(nodes)
- # with open(file_path,"w") as file:
- # for path in OC_ALL:
- # if path:
- # file.write(",".join(map(str,path)) + "\n")
- #read chain
- # with open(file_path, "r") as file:
- # OC_ALL = [list(map(int, line.strip().split(','))) for line in file]
- #get operation chain in any community
- community_dict = {}
- for comm in community:
- community_name = comm[0][0]
- community_dict[community_name] = comm
- # print(community_dict)
- # 注意community_dict中即保存了功能体结构
- # 取消将community_dict保存至文件,改为将其直接传递至flask
- # community_file = open('community.csv', 'w', newline='')
- # community_csv = csv.writer(community_file)
- # for community_source_node in community_dict:
- # for member_node in community_dict[community_source_node]:
- # community_csv.writerow([member_node[0], member_node[1], community_source_node])
- # community_file.close()
- # print("SOURCE", source)
- print("[output]", json.dumps({'msg': 'progress', 'data': 80}), flush=True)
- try:
- source_oc = get_communities_oc(community_dict)
- #get shared chain matrix
- OC_source = np.zeros((len(source),len(source)))
- for i,community_id_1 in enumerate(source):
- for j,community_id_2 in enumerate(source[i+1:],i+1):#只遍历上三角
- chains_1 = set(map(tuple, source_oc[community_id_1]))
- chains_2 = set(map(tuple, source_oc[community_id_2]))
- shared_chains = chains_1.intersection(chains_2)
- # if i == 3 and j ==4:
- # print(shared_chains)
- shared_count = len(shared_chains)
- OC_source[i][j] += shared_count
- OC_source[j][i] += shared_count # 利用对称性将值赋给矩阵的另一半
- except Exception as error:
- print(error, flush=True)
- # print(OC_source)
- # for i in range(len(source)):
- # for j in range(len(source)):
- # if i == j:
- # continue
- # else:
- # shared_oc = set(source_oc[i]).intersection(source_oc[j])
- # OC_source[i][j] += len(shared_oc)
- # print(OC_source)
- # OC_source_new = transform_matrix(OC_source).astype(int)
- # # print(OC_source_new)
- # #epsilon
- # epsilon_max = int(OC_source_new.max())
- # hierarchy_community = [list(source)]
- # epsilon_community_size = [(len(OC_source_new), 0)]
- # oc_temp = OC_source_new
- # oc_record = [list(oc_temp)]
- # phi_list = [] ## list of phi-epsilon
- # phi_ref_list = [] ## list of reference phi-epsilon
- # print("[output]", json.dumps({'msg': 'progress', 'data': 90}), flush=True)
- # for l in range(1,epsilon_max + 1):
- # # print('Epsilon:' + str(l) + '/' + str(epsilon_max) + '---' + 'Time Elapsed:' + str((time.perf_counter() - start_s3)))
- # temp = list(hierarchy_community[-1])
- # merging_count = 0 # count of num of merging (in each epsilon)
- # while True:
- # ij = np.argwhere(oc_temp == l) # Note: l starts from 1
- # # print("Ep = ",str(l),"ij = ",ij)
- # if len(ij) == 0: # no element == l
- # break
- # merging_count += 1
- # #change
- # rand_index = np.random.choice(len(ij))
- # ii, jj = ij[rand_index]
- # # ii = ij[0][0]
- # # jj = ij[0][1]
- # if type(temp[ii]) != list: # str to list
- # temp[ii] = [temp[ii]]
- # if type(temp[jj]) != list: # str to list
- # temp[jj] = [temp[jj]]
- # temp_com = temp[ii] + temp[jj] #merge community
- # tempp = [temp[ii],temp[jj]]
- # tempp_copy = list(tempp)
- # # print("--------------------")
- # # print("temp = ", temp, " Ep = ", str(l))
- # # print("temp[ii] = ",temp[ii]," temp[jj] = ",temp[jj]," temp_com = ",temp_com," tempp = ",tempp," temp_copy = ",tempp_copy)
- # # print("--------------------")
- # if len(temp[ii]) == 1:
- # tempp_copy[0] = temp[ii][0]
- # if len(temp[jj]) == 1:
- # tempp_copy[1] = temp[jj][0]
- # #merge community
- # temp.remove(tempp[0]) # remove old small community 1
- # temp.remove(tempp[1]) # remove old small community 2
- # temp.append(temp_com)
- # #shrink oc_matrix
- # oc_temp = Matrix_shrink_oc(oc_temp,ii,jj)
- # # print("oc_temp = ")
- # # print(oc_temp)
- # oc_record.append(oc_temp)
- # # jac_record.append(jac_temp)
- # hierarchy_community.append(temp)
- # epsilon_community_size.append((len(oc_temp),l+1))
- # print("hierarchy_community = ",hierarchy_community)
- # 注意hierarchy_community中保存了不同社团的层级关系,但是目前无法利用
- ## unconnected components ## i think oc_bad can merge
- # if len(np.argwhere(oc_temp == int(OC_source_new.max()))) == len(oc_temp)*(len(oc_temp)-1):#int(OC_source_new.max())is dummy
- # break
- # 准备汇报flask的数据
- result = {
- 'missionId': missionId,
- 'planId': planId,
- 'progress': 100,
- 'nodes': [],
- 'edges': [],
- }
- # 将节点和边直接放入nodes和edges
- result['nodes'] = fetchedData['nodes'].copy()
- # 删除可能存在的旧的group信息
- for n in result['nodes']:
- for meta_index in range(len(n['meta'])-1, -1, -1):
- if 'group' in n['meta'][meta_index]:
- del n['meta'][meta_index]
- result['edges'] = fetchedData['edges'].copy()
- # print(result['edges'])
- # print(result['nodes'])
- # 构建功能体核心节点与功能体编号的映射
- groups = {}
- group_index = 0
- for leader in community_dict:
- groups[group_index] = leader
- for group_node in community_dict[leader]:
- # 修改节点的meta,标注其所属的功能体
- node = [n for n in result['nodes'] if int(n['id']) == int(group_node[0])][0]
- for dicts in node['meta']:
- if type(dicts) != dict:
- print("ERROR, WRONG META", node['meta'])
- raise ValueError("ERROR, WRONG META")
- node['meta'].append({
- 'group': group_index,
- })
- group_index += 1
-
-
- print("[output]", json.dumps({'msg': 'result', 'data': result}), flush=True)
- ## refine hierarchy_community 0 ##
- # for i in range(len(hierarchy_community[0])):
- # hierarchy_community[0][i] = [(hierarchy_community[0][i])]
- #get hierarchy_nodes
- # com_node_dic = {}
- # for i in range(len(source)):
- # com_node_dic[source[i]] = community_nodes[i]
- # hierarchy_nodes = []
- # for hel_com in hierarchy_community:#对于层次的每一层
- # level_temp = []
- # for i in hel_com:#对于每层中的各个社团
- # if not isinstance(i,list):#不是小社团
- # nodes_all = set(com_node_dic[i]) if isinstance(com_node_dic[i], (list, set, tuple)) else set([com_node_dic[i]])
- # level_temp.append(nodes_all)
- # else:
- # nodes_all = set()
- # for j in i:
- # nodes_all.update(com_node_dic[j] if isinstance(com_node_dic[j], (list, set, tuple)) else [com_node_dic[j]])
- # level_temp.append(nodes_all)
- # hierarchy_nodes.append(level_temp)
- #
- # nodetemp = list(G.nodes)
- # sensors = 0 # 传感器
- # deciders = 0 # 决策者
- # influencer = 0 # 影响者
- # for i in range(len(nodetemp)):
- # tps = G._node[i]['type']
- # if tps == 'S':
- # sensors = sensors + 1
- # elif tps == 'D':
- # deciders = deciders + 1
- # else:
- # influencer = influencer + 1
- # print("Num of node S:" + str(sensors))
- # print("Num of node D:" + str(deciders))
- # print("Num of node I:" + str(influencer))
- # print('Num of nodes:'+ str(len(G.nodes)))
- # print('Num of edges:'+ str(len(G.edges)))
- # print('Num of operation chains:'+ str(len(OC_ALL)))
- # print('Num of sources:'+ str(len(source)))
- # print('Num of sinks:'+ str(len(sink)))
- # print('Num of isolated nodes:'+ str(len(iso)))
- # print('Num of leaf nodes:'+ str(len(leaf)))
- # print('Num of inner members:'+ str(inner))
- # print("hierarchy_community = ",hierarchy_community)
- # print("epsilon_community_size = ",epsilon_community_size)
- # print("epsilon_max = ",epsilon_max)
-
-
- # # save files
- # g = nx.read_gpickle("./generate/Graph_gpickleSCS.gpickle")
- # # #1.leaf sink hub
- # # 为列表中的每一个节点添加属性,同时检查节点是否存在
- # for node_list, node_type in zip([source, sink, iso, leaf], ["hub", "sink", "isolated", "leaf"]):
- # for node in node_list:
- # # 检查节点是否存在于图中
- # if node in g:
- # g._node[node]["detect_node"] = node_type
- # else:
- # # 如果节点不存在,输出一个错误消息
- # print(f"Node {node} not found in graph.")
- #2.small community
- # for community_index, nodes in enumerate(community_nodes):
- # for node in nodes:
- # if g.has_node(node):
- # g._node[node]["community"] = community_index
- #3.hierarchy_community
- # 遍历hierarchy_community和hierarchy_nodes来建立每个节点的属性
- # for level, (communities, nodes) in enumerate(zip(hierarchy_community, hierarchy_nodes)):
- # for community_id, community_nodes in zip(communities, nodes):
- # # 如果community_id是列表,那么社团需要合并
- # if isinstance(community_id, list):
- # for sub_community in community_id:
- # for node in community_nodes:
- # g._node[node] = {'community_id': sub_community, 'hierarchy_level': level}
- # else:
- # for node in community_nodes:
- # g._node[node] = {'community_id': community_id, 'hierarchy_level': level}
- # path = './generate/Graph_gml' + str(level)+'.gml'
- # nx.write_gml(g, path)
-
- # nx.write_gml(g, './generate/my_Graph_gml.gml')
- # nx.write_gpickle(g, "./generate/Graph_gpickleSCS.gpickle")
- if __name__ == '__main__':
- try:
- main_function()
- except Exception as error:
- print(str(error))
- print(traceback.format_exc())
- print("END ERROR")
|