from django.db import models import os, errno import csv from api.utils import * import json from random import randint types = [ ('csv', 'csv'), ] usages = [ ('input', 'input'), ('show', 'show'), ('result', 'result'), ('output', 'output'), ] contents = [ ('node', 'node'), ('edge', 'edge'), ] class FileManager(models.Manager): def getHistory(self, user): # try: files = user.own_files.filter(usage="input").all() history = [] for file in files: fileId = file.id directory = os.path.join(BASE_FILE_PATH, str(user.id)) path = os.path.join(directory, str(fileId)) try: size = os.path.getsize(path) except FileNotFoundError: print("未找到对应文件,现将记录删除1", fileId, file.name) self.get(id=fileId).delete() continue except Exception as error: print("读取历史记录时出现未知错误") return FAILED if size >= 1024 * 1024: size = size / (1024 * 1024) size = f"{size:.2f} MB" else: size = size / 1024 size = f"{size:.2f} KB" history.append({ 'id': file.id, 'name': file.name, 'uploadTime': file.update_time, 'size': size, 'content': file.content, }) return history # except Exception as error: # print("Failed to get upload history", error) # return FAILED # Create your models here. class File(models.Model): name = models.CharField(default="untitled", max_length=64) type = models.CharField(choices=types, max_length=5) usage = models.CharField(choices=usages, max_length=20) create_time = models.DateTimeField(auto_now_add=True) update_time = models.DateTimeField(auto_now=True) content = models.CharField(choices=contents, max_length=10) associate = models.ForeignKey('self', on_delete=models.CASCADE, blank=True, null=True) user = models.ForeignKey(to="api.User", on_delete=models.CASCADE, related_name='own_files') objects = FileManager() def saveWithInfo(self): path = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.id)) path2 = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.associate.id)) if self.content in ['node', 'nodes']: sCount = dCount = iCount = 0 nodeFile = csv.reader(open(path, 'r')) for line in nodeFile: if line[1] == 'S': sCount += 1 if line[1] == 'D': dCount += 1 if line[1] == 'I': iCount += 1 fileInfo = FileInfo() fileInfo.file = self fileInfo.nodes = sCount + dCount + iCount fileInfo.sNodes = sCount fileInfo.dNodes = dCount fileInfo.iNodes = iCount fileInfo.save() if self.content in ['edge', 'edges']: edges = 0 edgeFile = csv.reader(open(path2, 'r')) for line in edgeFile: edges += 1 fileInfo = FileInfo() fileInfo.file = self fileInfo.edges = edges fileInfo.save() self.save() def generate(self, data): # 从json结果生成文件 path = os.path.join(BASE_FILE_PATH, str(self.user.id)) if os.path.exists(os.path.join(path, str(self.id))): self.delete() return FILE_ALREADY_EXIST else: if self.content == 'node': nodes = [] file = open(os.path.join(path, str(self.id)), 'w', newline='') csvFile = csv.writer(file) for line in data: if not str(line[0]).isdigit(): print("check file illegal failed", "node", "id wrong") return FAILED if not line[1] in ['S', 'D', 'I']: print("check file illegal failed", "node", "type wrong") return FAILED if line[0] not in nodes: nodes.append(line[0]) else: print("check file illegal failed", "node", "dudplicate id") return FAILED # 除了节点编号和节点类型外,其余参数全部放在line的后续位置,以字符串json的格式保存 csvFile.writerow(line) file.close() return OK if self.content == 'edge': edges = [] file = open(os.path.join(path, str(self.id)), 'w', newline='') csvFile = csv.writer(file) for line in data: if not str(line[0]).isdigit() or not str(line[1]).isdigit(): print("check file illegal failed", "edge", "len =2") return FAILED # 注意默认将边视为无向边 # 检查重复 if [line[0], line[1]] not in edges and [line[1], line[0]] not in edges: edges.append([line[0], line[1]]) # 后续参数放在line的后续位置 csvFile.writerow(line) file.close() return OK return UNKNOWN_CONTENT def storage(self, file): try: path = os.path.join(BASE_FILE_PATH, str(self.user.id)) if os.path.exists(os.path.join(path, str(self.id))): self.delete() return FILE_ALREADY_EXIST else: try: os.mkdir(path) except Exception as error: if not error.args[0] == 17: print(error) return FILE_FAILED_CREATE_DIR file_path = os.path.join(path, str(self.id)) f = open(file_path, 'wb') for bite in file: f.write(bite) f.close() return OK except Exception as error: print(error) return FAILED # 检查文件是否合法 def checkIllegal(self): path = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.id)) path2 = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.associate.id)) if self.content == 'node': file = csv.reader(open(path, 'r')) # 针对csv文件的检测 if self.type == 'csv': nodes = [] for line in file: if not len(line) >= 2: print("check file illegal failed", "node", "len >= 2") return False if not line[0].isdigit(): print("check file illegal failed", "node", "id wrong") return False if not line[1] in ['S', 'D', 'I']: print("check file illegal failed", "node", "type wrong") return False if line[0] not in nodes: nodes.append(line[0]) else: print("check file illegal failed", "node", "dudplicate id") return False return True if self.content == 'edge': edgeFile = csv.reader(open(path, 'r')) nodeFile = csv.reader(open(path2, 'r')) # 针对csv文件的检测 if self.type == 'csv': nodes = [] edges = [] for line in nodeFile: if not len(line) >= 2: print("check file illegal failed", "node", "len >= 2") return False if not line[0].isdigit(): print("check file illegal failed", "node", "id wrong") return False nodes.append(line[0]) for line in edgeFile: if not len(line) == 2: print("check file illegal failed", "edge", "len =2") return False if line[0] not in nodes or line[1] not in nodes: print("check file illegal failed", "edge", "id not exist") return False if [line[0], line[1]] not in edges and [line[1], line[0]] not in edges: edges.append([line[0], line[1]]) else: # 将图视为无向图,同一条边的正反算作重复 print("check file illegal failed", "edge", "duplicate edge") return False return True def toJson(self): path = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.id)) file = csv.reader(open(path, 'r')) if self.content == 'node': if self.type == 'csv': nodes = [] for line in file: # 如果有额外数据,则放入第三个字段中 node = {'id': line[0], 'type': line[1], 'meta': []} for el in range(2, len(line)): node['meta'].append(json.loads(el)) # 测试用,添加optimize el = '{"optimize": "old"}' node['meta'].append(json.loads(el)) # 测试用,添加group el = '{"group": "' + str(randint(1,5)) + '"}' node['meta'].append(json.loads(el)) nodes.append(node) return nodes if self.content == 'edge': if self.type == 'csv': edges = [] for line in file: # 如果有额外数据,则放入第三个字段中 edge = {'from': line[0], 'to': line[1], 'meta': []} for el in range(2, len(line)): edge['meta'].append(json.loads(el)) # 测试用,添加optimize el = '{"optimize": "old"}' edge['meta'].append(json.loads(el)) edges.append(edge) return edges def deleteStorage(self): path = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.id)) if self.associate: path2 = os.path.join(os.path.join(BASE_FILE_PATH, str(self.user.id)), str(self.associate.id)) else: path2 = "" failedFlag = False for p in [path, path2]: if os.path.exists(p): try: os.remove(p) except Exception as error: # 可能出现失败的原因是文件被占用 print("删除文件" + self.id + self.name + "失败", error) failedFlag = True # 无论文件删除是否成功,都要把记录删除,多余的文件可以再后续清理时删除 if self.associate: self.associate.delete() if self: self.delete() if failedFlag: return FAILED return OK class Meta: app_label = 'api' class FileInfo(models.Model): file = models.OneToOneField(File, on_delete=models.CASCADE, related_name='own_file_info') nodes = models.IntegerField(default=0) sNodes = models.IntegerField(default=0) dNodes = models.IntegerField(default=0) iNodes = models.IntegerField(default=0) edges = models.IntegerField(default=0) # 待添加集中度等边的信息 class Meta: app_label = 'api'