Sjim 2 years ago
parent
commit
5a47b5b0f6
64 changed files with 1817 additions and 388 deletions
  1. 118 0
      File/outbuf.py
  2. 891 1
      File/utils.py
  3. 0 3
      Target/File/checkpoint_4.py
  4. 0 2
      Target/File/checkpoint_7.py
  5. 46 0
      Target/File/utils_10.py
  6. 7 0
      Target/File/utils_11.py
  7. 7 0
      Target/File/utils_12.py
  8. 9 0
      Target/File/utils_13.py
  9. 39 0
      Target/File/utils_14.py
  10. 15 0
      Target/File/utils_15.py
  11. 23 0
      Target/File/utils_16.py
  12. 71 0
      Target/File/utils_17.py
  13. 20 0
      Target/File/utils_18.py
  14. 20 0
      Target/File/utils_19.py
  15. 33 0
      Target/File/utils_20.py
  16. 5 0
      Target/File/utils_21.py
  17. 11 0
      Target/File/utils_22.py
  18. 20 0
      Target/File/utils_23.py
  19. 50 0
      Target/File/utils_24.py
  20. 4 0
      Target/File/utils_25.py
  21. 3 0
      Target/File/utils_26.py
  22. 23 0
      Target/File/utils_27.py
  23. 55 0
      Target/File/utils_28.py
  24. 42 0
      Target/File/utils_29.py
  25. 3 0
      Target/File/utils_3.py
  26. 42 0
      Target/File/utils_30.py
  27. 6 0
      Target/File/utils_31.py
  28. 4 0
      Target/File/utils_32.py
  29. 23 0
      Target/File/utils_33.py
  30. 21 0
      Target/File/utils_34.py
  31. 12 0
      Target/File/utils_35.py
  32. 29 0
      Target/File/utils_36.py
  33. 24 0
      Target/File/utils_37.py
  34. 20 0
      Target/File/utils_38.py
  35. 8 0
      Target/File/utils_4.py
  36. 7 0
      Target/File/utils_5.py
  37. 9 0
      Target/File/utils_6.py
  38. 6 0
      Target/File/utils_7.py
  39. 44 0
      Target/File/utils_8.py
  40. 46 0
      Target/File/utils_9.py
  41. 0 26
      Target/Hash/EncrypC_3.py
  42. 0 20
      Target/Hash/EncryptionDecryption_2.py
  43. 0 10
      Target/Hash/base64_2.py
  44. 0 14
      Target/Hash/base64_3.py
  45. 0 5
      Target/Hash/base64_4.py
  46. 0 3
      Target/Hash/base64_5.py
  47. 0 69
      Target/Hash/biometry_hash_5.py
  48. 0 27
      Target/Hash/biometry_hash_8.py
  49. 0 12
      Target/Hash/crypto_4.py
  50. 0 2
      Target/Hash/crypto_6.py
  51. 0 2
      Target/Hash/crypto_7.py
  52. 0 7
      Target/Hash/dirist_14.py
  53. 0 11
      Target/Hash/dirist_15.py
  54. 0 4
      Target/Hash/hash_1.py
  55. 0 4
      Target/Hash/hash_2.py
  56. 0 21
      Target/Hash/hash_3.py
  57. 0 27
      Target/Hash/md5_encryption_1.py
  58. 0 14
      Target/Hash/simple-hash_2.py
  59. 0 23
      Target/Pseudonym/anonymize_3.py
  60. 0 13
      Target/Pseudonym/anonymize_5.py
  61. 0 42
      Target/Pseudonym/anonymize_6.py
  62. 0 20
      Target/Pseudonym/dataFrameAnonymizer_3.py
  63. 0 5
      Target/Pseudonym/pseudodepseudonimizer_2.py
  64. 1 1
      utils/splitfile.py

+ 118 - 0
File/outbuf.py

@@ -0,0 +1,118 @@
+#!/usr/bin/python -u
+import sys
+import libxml2
+import StringIO
+
+
+def testSimpleBufferWrites():
+    f = StringIO.StringIO()
+    buf = libxml2.createOutputBuffer(f, "ISO-8859-1")
+    buf.write(3, "foo")
+    buf.writeString("bar")
+    buf.close()
+
+    if f.getvalue() != "foobar":
+        print
+        "Failed to save to StringIO"
+        sys.exit(1)
+
+
+def testSaveDocToBuffer():
+    """
+    Regression test for bug #154294.
+    """
+    input = '<foo>Hello</foo>'
+    expected = '''\
+<?xml version="1.0" encoding="UTF-8"?>
+<foo>Hello</foo>
+'''
+    f = StringIO.StringIO()
+    buf = libxml2.createOutputBuffer(f, 'UTF-8')
+    doc = libxml2.parseDoc(input)
+    doc.saveFileTo(buf, 'UTF-8')
+    doc.freeDoc()
+
+
+
+def testSaveFormattedDocToBuffer():
+    input = '<outer><inner>Some text</inner><inner/></outer>'
+    # The formatted and non-formatted versions of the output.
+    expected = ('''\
+<?xml version="1.0" encoding="UTF-8"?>
+<outer><inner>Some text</inner><inner/></outer>
+''', '''\
+<?xml version="1.0" encoding="UTF-8"?>
+<outer>
+  <inner>Some text</inner>
+  <inner/>
+</outer>
+''')
+    doc = libxml2.parseDoc(input)
+    for i in (0, 1):
+        f = StringIO.StringIO()
+        buf = libxml2.createOutputBuffer(f, 'UTF-8')
+        doc.saveFormatFileTo(buf, 'UTF-8', i)
+        if f.getvalue() != expected[i]:
+            print
+            'xmlDoc.saveFormatFileTo() call failed.'
+            print
+            '     got: %s' % repr(f.getvalue())
+            print
+            'expected: %s' % repr(expected[i])
+            sys.exit(1)
+    doc.freeDoc()
+
+
+def testSaveIntoOutputBuffer():
+    """
+    Similar to the previous two tests, except this time we invoke the save
+    methods on the output buffer object and pass in an XML node object.
+    """
+    input = '<foo>Hello</foo>'
+    expected = '''\
+<?xml version="1.0" encoding="UTF-8"?>
+<foo>Hello</foo>
+'''
+    f = StringIO.StringIO()
+    doc = libxml2.parseDoc(input)
+    buf = libxml2.createOutputBuffer(f, 'UTF-8')
+    buf.saveFileTo(doc, 'UTF-8')
+    if f.getvalue() != expected:
+        print
+        'outputBuffer.saveFileTo() call failed.'
+        print
+        '     got: %s' % repr(f.getvalue())
+        print
+        'expected: %s' % repr(expected)
+        sys.exit(1)
+    f = StringIO.StringIO()
+    buf = libxml2.createOutputBuffer(f, 'UTF-8')
+    buf.saveFormatFileTo(doc, 'UTF-8', 1)
+    if f.getvalue() != expected:
+        print
+        'outputBuffer.saveFormatFileTo() call failed.'
+        print
+        '     got: %s' % repr(f.getvalue())
+        print
+        'expected: %s' % repr(expected)
+        sys.exit(1)
+    doc.freeDoc()
+
+
+if __name__ == '__main__':
+    # Memory debug specific
+    libxml2.debugMemory(1)
+
+    testSimpleBufferWrites()
+    testSaveDocToBuffer()
+    testSaveFormattedDocToBuffer()
+    testSaveIntoOutputBuffer()
+
+    libxml2.cleanupParser()
+    if libxml2.debugMemory(1) == 0:
+        print
+        "OK"
+    else:
+        print
+        "Memory leak %d bytes" % (libxml2.debugMemory(1))
+        libxml2.dumpMemory()

+ 891 - 1
File/utils.py

@@ -31,4 +31,894 @@ def save_task_checkpoint(file_path, task_num):
         task_num (int): Number of task increment.
         task_num (int): Number of task increment.
     """
     """
     save_path = os.path.join(file_path, 'checkpoint_task_' + str(task_num) + '.pth.tar')
     save_path = os.path.join(file_path, 'checkpoint_task_' + str(task_num) + '.pth.tar')
-    shutil.copyfile(os.path.join(file_path, 'checkpoint.pth.tar'), save_path)
+    shutil.copyfile(os.path.join(file_path, 'checkpoint.pth.tar'), save_path)
+
+
+def pickle_dump(item, out_file):
+    with open(out_file, "wb") as opened_file:
+        pickle.dump(item, opened_file)
+
+
+def write_to_clf(clf_data, save_file):
+    # Save dataset for text classification to file.
+    """
+    clf_data: List[List[str]] [[text1, label1],[text2,label2]...]
+    file format: tsv, row: text + tab + label
+    """
+    with open(save_file, 'w', encoding='utf-8') as f:
+        f.writelines("\n".join(["\t".join(str(r) for r in row) for row in clf_data]))
+
+
+def write_to_seq2seq(seq_data, save_file):
+    """
+    clf_data: List[List[str]] [[src1, tgt1],[src2,tgt2]...]
+    file format: tsv, row: src + tab + tgt
+    """
+    with open(save_file, 'w', encoding='utf-8') as f:
+        f.writelines("\n".join(["\t".join([str(r) for r in row]) for row in seq_data]))
+
+
+def write_to_ner(cls, ner_data, save_file):
+    """
+    :param cls:
+    :param ner_data:
+    :param save_file:
+    :return:
+    """
+    with open(save_file, 'w', encoding='utf-8') as f:
+        f.writelines("\n".join(["\t".join(str(r) for r in row) for row in ner_data]))
+
+
+def quick_save(self, model, save_name, optimizer=None):
+    save_path = os.path.join(self.save_dir, save_name + '_weights.pth')
+    if optimizer:
+        opt_weights = optimizer.get_weights()
+        np.save(os.path.join(self.save_dir, save_name + '_opt_weights'), opt_weights)
+    model.save_weights(save_path, save_format='h5')
+
+
+def save(self, model, iter_nb, train_metrics_values, test_metrics_values, tasks_weights=[], optimizer=None):
+    self.logs_dict['train'][str(iter_nb)] = {}
+    self.logs_dict['val'][str(iter_nb)] = {}
+    for k in range(len(self.metrics)):
+        self.logs_dict['train'][str(iter_nb)][self.metrics[k]] = float(train_metrics_values[k])
+        self.logs_dict['val'][str(iter_nb)][self.metrics[k]] = float(test_metrics_values[k])
+
+    if len(tasks_weights) > 0:
+        for k in range(len(tasks_weights)):
+            self.logs_dict['val'][str(iter_nb)]['weight_' + str(k)] = tasks_weights[k]
+
+    with open(self.logs_file, 'w') as f:
+        json.dump(self.logs_dict, f)
+
+    ckpt = {
+        'model_state_dict': model.state_dict(),
+        'iter_nb': iter_nb,
+    }
+    if optimizer:
+        ckpt['optimizer_state_dict'] = optimizer.state_dict()
+
+    # Saves best miou score if reached
+    if 'MEAN_IOU' in self.metrics:
+        miou = float(test_metrics_values[self.metrics.index('MEAN_IOU')])
+        if miou > self.best_miou and iter_nb > 0:
+            print('Best miou. Saving it.')
+            torch.save(ckpt, self.best_miou_weights_file)
+            self.best_miou = miou
+            self.config_dict['best_miou'] = self.best_miou
+    # Saves best relative error if reached
+    if 'REL_ERR' in self.metrics:
+        rel_error = float(test_metrics_values[self.metrics.index('REL_ERR')])
+        if rel_error < self.best_rel_error and iter_nb > 0:
+            print('Best rel error. Saving it.')
+            torch.save(ckpt, self.best_rel_error_weights_file)
+            self.best_rel_error = rel_error
+            self.config_dict['best_rel_error'] = self.best_rel_error
+
+    # Saves last checkpoint
+    torch.save(ckpt, self.last_checkpoint_weights_file)
+    self.iter_nb = iter_nb
+    self.config_dict['iter'] = self.iter_nb
+    with open(self.config_file, 'w') as f:
+        json.dump(self.config_dict, f)
+
+
+def extract_spec(dataset='train'):
+    f = open(data_path + dataset + '_list.txt', 'r')
+
+    i = 0
+    for file_name in f:
+        i = i + 1
+        if not (i % 10):
+            print(i)
+
+        # load audio file
+        file_name = file_name.rstrip('\n')
+        file_path = data_path + file_name
+        # print file_path
+        y0, sr = librosa.load(file_path, sr=22050)
+        # we use first 1 second
+        half = len(y0) / 4
+        y = y0[:round(half)]
+        # mfcc
+        mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=MFCC_DIM)
+        # delta mfcc and double delta
+        delta_mfcc = librosa.feature.delta(mfcc)
+        ddelta_mfcc = librosa.feature.delta(mfcc, order=2)
+
+        # STFT
+        D = np.abs(librosa.core.stft(y, hop_length=512, n_fft=1024, win_length=1024))
+        D_dB = librosa.amplitude_to_db(D, ref=np.max)
+
+        # mel spectrogram
+        mel_S = librosa.feature.melspectrogram(S=D, sr=sr, n_mels=128)
+        S_dB = librosa.power_to_db(mel_S, ref=np.max)  # log compression
+
+        # spectral centroid
+        spec_centroid = librosa.feature.spectral_centroid(S=D)
+
+        # concatenate all features
+        features = np.concatenate([mfcc, delta_mfcc, ddelta_mfcc, spec_centroid], axis=0)
+
+        # save mfcc as a file
+        file_name = file_name.replace('.wav', '.npy')
+        save_file = spec_path + file_name
+
+        if not os.path.exists(os.path.dirname(save_file)):
+            os.makedirs(os.path.dirname(save_file))
+        np.save(save_file, features)
+
+    f.close();
+
+
+def extract_codebook(dataset='train'):
+    f = open(data_path + dataset + '_list.txt', 'r')
+    i = 0
+    for file_name in f:
+        i = i + 1
+        if not (i % 10):
+            print(i)
+        # load audio file
+        file_name = file_name.rstrip('\n')
+        file_path = data_path + file_name
+        # #print file_path
+        y0, sr = librosa.load(file_path, sr=22050)
+        # we use first 1 second
+        half = len(y0) / 4
+        y = y0[:round(half)]
+        # STFT
+        S_full, phase = librosa.magphase(librosa.stft(y, n_fft=1024, window='hann', hop_length=256, win_length=1024))
+        n = len(y)
+
+        # Check the shape of matrix: row must corresponds to the example index !!!
+        X = S_full.T
+
+        # codebook by using K-Means Clustering
+        K = 20
+        kmeans = KMeans(n_clusters=K, random_state=0).fit(X)
+        features_kmeans = np.zeros(X.shape[0])
+        # for each sample, summarize feature!!!
+        codebook = np.zeros(K)
+        for sample in range(X.shape[0]):
+            features_kmeans[sample] = kmeans.labels_[sample]
+
+        # codebook histogram!
+        unique, counts = np.unique(features_kmeans, return_counts=True)
+
+        for u in unique:
+            u = int(u)
+            codebook[u] = counts[u]
+        # save mfcc as a file
+        file_name = file_name.replace('.wav', '.npy')
+        save_file = codebook_path + file_name
+
+        if not os.path.exists(os.path.dirname(save_file)):
+            os.makedirs(os.path.dirname(save_file))
+        np.save(save_file, codebook)
+
+    f.close()
+
+
+def run(self):
+    file = QtCore.QFile(self.filePath)
+    if not file.open(QtCore.QIODevice.WriteOnly):
+        self.saveFileFinished.emit(SAVE_FILE_ERROR, self.urlStr, self.filePath)
+    file.write(self.fileData)
+    file.close()
+    self.saveFileFinished.emit(0, self.urlStr, self.filePath)
+
+
+def saveFile(self, fileName, data):
+    file = QtCore.QFile(fileName)
+    if not file.open(QtCore.QIODevice.WriteOnly):
+        return False
+    file.write(data.readAll())
+    file.close()
+    return True
+
+
+def serialize(self):
+    """Callback to serialize the array."""
+    string_file = io.BytesIO()
+    try:
+        numpy.save(string_file, self.array, allow_pickle=False)
+        serialized = string_file.getvalue()
+    finally:
+        string_file.close()
+    return serialized
+
+
+def train(self, save=False, save_dir=None):
+    train_img_list = glob.glob(self.path_train + "/*")
+    print(train_img_list)
+
+    train_features = []
+
+    for img_file in train_img_list:
+        img = io.imread(img_file)
+        img = color.rgb2lab(img)
+        img_features = self.extract_texton_feature(img, self.fb, self.nb_features)
+        train_features.extend(img_features)
+
+    train_features = np.array(train_features)
+    print(train_features.shape)
+
+    kmeans_cluster = MiniBatchKMeans(n_clusters=self.nb_clusters, verbose=1, max_iter=300)
+    kmeans_cluster.fit(train_features)
+    print(kmeans_cluster.cluster_centers_)
+    print(kmeans_cluster.cluster_centers_.shape)
+
+    self.cluster = kmeans_cluster
+
+    # save kmeans result
+    if save is True:
+        with open(save_dir, 'wb') as f:
+            pickle.dump(self.cluster, f)
+
+    def save(self, event):
+        if not self.filename:
+            self.save_as(event)
+        else:
+            if self.writefile(self.filename):
+                self.set_saved(True)
+                try:
+                    self.editwin.store_file_breaks()
+                except AttributeError:  # may be a PyShell
+                    pass
+        self.text.focus_set()
+        return "break"
+
+
+def writefile(self, filename):
+    self.fixlastline()
+    chars = self.encode(self.text.get("1.0", "end-1c"))
+    if self.eol_convention != "\n":
+        chars = chars.replace("\n", self.eol_convention)
+    try:
+        f = open(filename, "wb")
+        f.write(chars)
+        f.flush()
+        f.close()
+        return True
+    except IOError as msg:
+        tkMessageBox.showerror("I/O Error", str(msg),
+                               master=self.text)
+        return False
+
+
+def save_response_content(response,
+                          destination,
+                          file_size=None,
+                          chunk_size=32768):
+    if file_size is not None:
+        pbar = tqdm(total=math.ceil(file_size / chunk_size), unit='chunk')
+
+        readable_file_size = sizeof_fmt(file_size)
+    else:
+        pbar = None
+
+    with open(destination, 'wb') as f:
+        downloaded_size = 0
+        for chunk in response.iter_content(chunk_size):
+            downloaded_size += chunk_size
+            if pbar is not None:
+                pbar.update(1)
+                pbar.set_description(f'Download {sizeof_fmt(downloaded_size)} '
+                                     f'/ {readable_file_size}')
+            if chunk:  # filter out keep-alive new chunks
+                f.write(chunk)
+        if pbar is not None:
+            pbar.close()
+
+
+def generateHuman(cloth_list, person_id, sex):
+    haveAcc = 0
+    # load acc
+    hair = open('modeleTxt/hair.txt', 'r').readlines()
+    shoe = open('modeleTxt/shoe.txt', 'r').readlines()
+    pifu = open('modeleTxt/skin.txt', 'r').readlines()
+
+    if not os.path.exists(person_save_Folder):
+        os.makedirs(person_save_Folder)
+
+    if sex > 0:
+        Gender1 = 1000000
+    else:
+        Gender1 = 0
+    #     setting
+    Gender = '%.6f' % (Gender1 / 1000000)
+    Muscle = '%.6f' % (random.randint(0, 1000000) / 1000000)
+    African_1 = random.randint(0, 1000000)
+    African = '%.6f' % (African_1 / 1000000)
+    Asian_1 = random.randint(0, 1000000 - African_1)
+    Asian = '%.6f' % (Asian_1 / 1000000)
+    Caucasian = '%.6f' % ((1000000 - Asian_1 - African_1) / 1000000)
+    if Gender1 > 1000000 / 2:
+        m_height = random.gauss(170, 5.7) / 200
+        while m_height > 1:
+            m_height = random.gauss(170, 5.7) / 200
+        Height = '%.6f' % (m_height)
+    else:
+        m_height = random.gauss(160, 5.2) / 200
+        while m_height > 1:
+            m_height = random.gauss(160, 5.2) / 200
+        Height = '%.6f' % (m_height)
+    BreastSize = '%.6f' % (random.randint(0, 70) / 100)
+    Age = '%.6f' % (random.randint(20, 90) / 100)
+    BreastFirmness = '%.6f' % (random.randint(30, 100) / 100)
+    Weight = '%.6f' % (random.randint(0, 1000000) / 1000000)
+
+    file_name = 'B' + str(person_id)
+    # creating person file
+    f = open(person_save_Folder + file_name + ".mhm", 'a')
+    f.write('# Written by MakeHuman 1.1.1\n')
+    f.write('version v1.1.1\n')
+    f.write('tags ' + file_name + '\n')
+    f.write('camera 0.0 0.0 0.0 0.0 0.0 1.0\n')
+    f.write('modifier macrodetails-universal/Muscle ' + Muscle + '\n')
+    f.write('modifier macrodetails/African ' + African + '\n')
+    f.write('modifier macrodetails-proportions/BodyProportions 0.500000\n')
+    f.write('modifier macrodetails/Gender ' + Gender + '\n')
+    f.write('modifier macrodetails-height/Height ' + Height + '\n')
+    f.write('modifier breast/BreastSize ' + BreastSize + '\n')
+    f.write('modifier macrodetails/Age ' + Age + '\n')
+    f.write('modifier breast/BreastFirmness ' + BreastFirmness + '\n')
+    f.write('modifier macrodetails/Asian ' + Asian + '\n')
+    f.write('modifier macrodetails/Caucasian ' + Caucasian + '\n')
+    f.write('modifier macrodetails-universal/Weight ' + Weight + '\n')
+    f.write('skeleton cmu_mb.mhskel\n')
+    f.write('eyes HighPolyEyes 2c12f43b-1303-432c-b7ce-d78346baf2e6\n')
+
+    # adding clothes
+    if Gender1 > 1000000 / 2:
+        f.write(hair[random.randint(0, len(hair) - 1)])
+    else:
+        f.write(hair[random.randint(0, len(hair) - 1)])
+    f.write(shoe[random.randint(0, len(shoe) - 1)])
+    for i in range(0, len(cloth_list)):
+        f.write(cloth_list[i] + '\n')
+    f.write('clothesHideFaces True\n')
+    f.write(pifu[random.randint(0, len(pifu) - 1)])
+    f.write('material Braid01 eead6f99-d6c6-4f6b-b6c2-210459d7a62e braid01.mhmat\n')
+    f.write('material HighPolyEyes 2c12f43b-1303-432c-b7ce-d78346baf2e6 eyes/materials/brown.mhmat\n')
+    f.write('subdivide False\n')
+
+
+def notice_write(request):
+    if request.method == 'POST':
+        form = ContentForm(request.POST)
+        form_file = FileForm(request.POST, request.FILES)
+        if form.is_valid():
+            question = form.save(commit=False)
+            question.author = request.user
+            question.create_date = timezone.now()
+            question.boardname_id = 7
+            question.save()
+            if form_file.is_valid():
+                form_file = FileForm(request.POST, request.FILES)
+                file_save = form_file.save(commit=False)
+                file_save.author = request.user
+                file_save.postcontent = question
+                file_save.boardname_id = 7
+                file_save.file = request.FILES.get("file")
+                file_save.save()
+            return redirect('notice_view')
+    return render(request, 'notice_write.html')
+
+
+def test_write(request):
+    if request.method == 'POST':
+        form = ContentForm(request.POST)
+        form_file = FileForm(request.POST, request.FILES)
+        if form.is_valid():
+            question = form.save(commit=False)
+            question.author = request.user
+            question.create_date = timezone.now()
+            question.boardname_id = 14
+            question.save()
+            if form_file.is_valid():
+                form_file = FileForm(request.POST, request.FILES)
+                file_save = form_file.save(commit=False)
+                file_save.author = request.user
+                file_save.postcontent = question
+                file_save.boardname_id = 14
+                file_save.file = request.FILES.get("file")
+                file_save.save()
+            return redirect('test_list')
+    return render(request, 'test_write.html')
+
+
+def down_file(url, name, path):
+    if os.path.exists(path):
+        return
+
+    print("开始下载:" + name + ".mp3")
+    headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
+               "Accept-Encoding": "gzip, deflate, br",
+               "Accept-Language": "zh-CN,zh;q=0.9",
+               "Upgrade-Insecure-Requests": "1",
+               'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
+
+    count = 0
+    while count < 3:
+        try:
+
+            r = requests.get(url, headers=headers, stream=True, timeout=60)
+            # print(r.status_code)
+            if (r.status_code == 200):
+                with open(path, "wb+") as f:
+                    for chunk in r.iter_content(1024):
+                        f.write(chunk)
+                print("完成下载:" + name + ".mp3")
+                break
+        except Exception as e:
+            print(e)
+            print("下载出错:" + name + ".mp3,3秒后重试")
+            if os.path.exists(path):
+                os.remove(path)
+
+            time.sleep(3)
+        count += 1
+
+    pass
+
+
+def save_as():
+    global file_name
+    content = content_text.get(1.0, 'end')
+    with open(file_name, 'w') as save:
+        save.write(content)
+
+
+def export_save(data_player, data_kick, guild_id, save_name=""):
+    if save_name: save_name = "_" + save_name
+    print(" - Partie enregistrée -")
+
+    with open(f"saves/save{save_name}.json", "w") as file:
+        file.write(json.dumps(
+            {
+                "players": [data_player[player_id].export() for player_id in data_player],
+                "kicks": data_kick,
+                "guild_id": guild_id
+            }, indent=4))
+
+
+def conv(heic_path, save_dir, filetype, quality):
+    # 保存先のディレクトリとファイル名
+    extension = "." + filetype
+    save_path = save_dir / filetype / pathlib.Path(*heic_path.parts[1:]).with_suffix(extension)
+    # フォルダ作成
+    save_path.parent.mkdir(parents=True, exist_ok=True)
+    # HEICファイルpyheifで読み込み
+    heif_file = pyheif.read(heic_path)
+    # 読み込んだファイルの中身をdata変数へ
+    data = Image.frombytes(
+        heif_file.mode,
+        heif_file.size,
+        heif_file.data,
+        "raw",
+        heif_file.mode,
+        heif_file.stride,
+    )
+    # JPEGで保存
+    data.save(save_path, quality=quality)
+    print("保存:", save_path)
+
+
+def parsing_sravni_ru(soup):
+    names = soup.find_all('span', class_='_106rrj0')  # scraping names
+
+    # scraping age childrens
+    age_divs = soup.find_all('div', {'style': 'grid-area:firstCell-1', 'class': '_pjql8'})
+    ages = []
+    for i in age_divs:
+        age_span = i.find('span')
+        ages.append(age_span)
+
+    # scraping course duration
+    duration_divs = soup.find_all('div', {'style': 'grid-area:secondCell-1', 'class': '_pjql8'})
+    durations = []
+    for i in duration_divs:
+        duration_span = i.find('span')
+        durations.append(duration_span)
+
+    # scraping price
+    prices = soup.find_all('span', class_='_e9qrci _k8dl2y')
+
+    items = []
+    for (n, l, i, p) in zip(names, ages, durations, prices):
+        name = n.text.strip()
+        age = l.text.strip()
+        duration = i.text.strip()
+        price = p.text.strip().replace('\xa0', '')
+        items.append(
+            {
+                'name': name,
+                'age': age,
+                'duration': duration,
+                'price': price,
+            }
+        )
+
+    # save json file
+    with open("./data/items.json", "w", encoding="utf-8") as f:
+        json.dump(items, f, indent=4, ensure_ascii=False)
+
+    with open("./data/items.csv", 'a', encoding="utf-8") as file:
+        for i in items:
+            writer = csv.writer(file)
+            writer.writerow(
+                (
+                    i['name'],
+                    i['age'],
+                    i['duration'],
+                    i['price']
+                )
+            )
+
+
+def save_to_file(self, path):
+    with open(path, "w") as f:
+        f.write(self.cert_pem())
+        f.write(self.key_pem())
+
+
+def save_cert_to_file(self, path):
+    with open(path, "w") as f:
+        f.write(self.cert_pem())
+
+
+def _save_large_file(self, os_path, content, format):
+    """Save content of a generic file."""
+    if format not in {'text', 'base64'}:
+        raise web.HTTPError(
+            400,
+            "Must specify format of file contents as 'text' or 'base64'",
+        )
+    try:
+        if format == 'text':
+            bcontent = content.encode('utf8')
+        else:
+            b64_bytes = content.encode('ascii')
+            bcontent = base64.b64decode(b64_bytes)
+    except Exception as e:
+        raise web.HTTPError(
+            400, u'Encoding error saving %s: %s' % (os_path, e)
+        )
+
+    with self.perm_to_403(os_path):
+        if os.path.islink(os_path):
+            os_path = os.path.join(os.path.dirname(os_path), os.readlink(os_path))
+        with io.open(os_path, 'ab') as f:
+            f.write(bcontent)
+
+
+def get_unzip_hdfs_file(hdfs_file_url, save_dir):
+    # 判断保存路径是否存在,不存在的话创建此目录
+    if os.path.isdir(save_dir):
+        pass
+    else:
+        os.mkdir(save_dir)
+
+    # hdfs文件名
+    filename = hdfs_file_url.split("/").pop()
+
+    # 保存到本地的文件名
+    save_filename = ""
+
+    # 判断是否为压缩文件
+    if filename.endswith(".gz"):
+        save_filename = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time())) + ".gz"
+    else:
+        save_filename = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
+
+    # 判断保存路径最后是否有/
+    if save_dir.endswith("/"):
+        save_file = save_dir + save_filename
+    else:
+        save_file = save_dir + "/" + save_filename
+
+    # 生成下载hdfs文件的命令
+    hadoop_get = 'hadoop fs -get %s %s' % (hdfs_file_url, save_file)
+    logger.info("download hdfs file cammond: " + hadoop_get)
+    # shell执行生成的hdfs命令
+    try:
+        os.system(hadoop_get)
+    except Exception as e:
+        logger.error(e)
+        return False
+
+    # 判断下载的hdfs文件是否为压缩文件
+    if save_file.endswith(".gz"):
+
+        # 对此压缩文件进行压缩
+        try:
+            # 解压后的文件名
+            f_name = save_file.replace(".gz", "")
+            # 解压缩
+            g_file = gzip.GzipFile(save_file)
+            # 写入文件
+            open(f_name, "w+").write(g_file.read())
+            # 关闭文件流
+            g_file.close()
+
+            return f_name
+        except Exception as e:
+            logger.error(e)
+            return False
+    else:
+        return save_file
+
+
+"""
+根据HDFS文件目录下载此目录下所有的文件
+参数说明:
+hdfs_dir:HDFS文件目录
+save_dir:要保存的目录
+返回结果说明:执行成功返回True,执行失败返回False
+"""
+
+
+def get_unzip_hdfs_file_from_dir(hdfs_dir, save_dir):
+    # 命令:获取hdfs目录下的文件
+    hadoop_ls = "hadoop fs -ls %s | grep -i '^-'" % hdfs_dir
+
+    # 解压后的文件列表
+    save_file_list = []
+    # 执行shell命令
+    hdfs_result = exec_sh(hadoop_ls, None)
+
+    # 获取命令执行输出
+    hdfs_stdout = hdfs_result["stdout"]
+    # print("hdfs_stdout = " + hdfs_stdout)
+
+    # 要下载的HDFS文件列表
+    hdfs_list = []
+
+    # 判断是否有输出
+    if hdfs_stdout:
+        # 以行分割, 一行是一个文件的信息
+        hdfs_lines = hdfs_stdout.split("\n")
+
+        # 对每一行进行处理
+        for line in hdfs_lines:
+
+            # 以空白字符为分割符获取hdfs文件名
+            line_list = re.split("\s+", line)
+
+            # -rw-r--r--   2 caoweidong supergroup      42815 2017-01-23 14:20 /user/000000_0.gz
+            if line_list.__len__() == 8:
+                # print("line_list[7] = " + line_list[7])
+
+                # HDFS文件加入下载列表
+                hdfs_list.append(line_list[7])
+            else:
+                pass
+        # 下载文件
+        for file in hdfs_list:
+            save_filename = get_unzip_hdfs_file(file, save_dir)
+            save_file_list.append(save_filename)
+        return save_file_list
+    else:
+        return False
+
+
+def save_game(self):
+    save_file = open("saves/main_save.xml", "w+")
+
+    level = self.save_level()
+    self.tree.append(level)
+
+    team = self.save_team()
+    self.tree.append(team)
+
+    # Store XML tree in file
+    save_file.write(etree.tostring(self.tree, pretty_print=True, encoding="unicode"))
+
+    save_file.close()
+
+    def save_upload_file(
+            self,
+            file: UploadFile,
+            save_dir_path: pathlib.Path,
+            job_id: str,
+            dt_string: str,
+    ) -> pathlib.Path:
+        """Save `file` under `save_dir_path`.
+        Args:
+            file (UploadFile): A file want to save.
+            save_dir_path (pathlib.Path): A path to directory where file will be saved.
+            job_id (str): A job id. This will used part of filename.
+            dt_string (str): A datetime info. This will used part of filename.
+        Return:
+            pathlib.Path: A path where file is saved.
+        """
+        if not save_dir_path.exists():
+            save_dir_path.mkdir(parents=True, exist_ok=True)
+
+        save_path: Final = save_dir_path / f"{dt_string}_{job_id}_{file.filename}"
+
+        try:
+            with save_path.open("wb") as f:
+                shutil.copyfileobj(file.file, f)
+        finally:
+            file.file.close()
+
+        return save_path
+
+
+def save_output(output, list_to_save):
+    if not output:
+        with open(output, "w") as f:
+            for item in list_to_save:
+                f.write("%s\n" % item)
+        print(f"Output file: {output}")
+
+
+def _saveTestWavFile(self, filename, wav_data):
+    with open(filename, "wb") as f:
+        file_path = os.path.join(dir_name, "some_audio_%d.wav" % i)
+        self._saveTestWavFile(file_path, wav_data)
+
+
+def _save_large_file(self, os_path, content, format):
+    """Save content of a generic file."""
+    if format not in {'text', 'base64'}:
+        raise web.HTTPError(
+            400,
+            "Must specify format of file contents as 'text' or 'base64'",
+        )
+    try:
+        if format == 'text':
+            bcontent = content.encode('utf8')
+        else:
+            b64_bytes = content.encode('ascii')
+            bcontent = base64.b64decode(b64_bytes)
+    except Exception as e:
+        raise web.HTTPError(
+            400, u'Encoding error saving %s: %s' % (os_path, e)
+        )
+
+    with self.perm_to_403(os_path):
+        if os.path.islink(os_path):
+            os_path = os.path.join(os.path.dirname(os_path), os.readlink(os_path))
+        with io.open(os_path, 'ab') as f:
+            f.write(bcontent)
+
+
+def _post_save_script(model, os_path, contents_manager, **kwargs):
+    """convert notebooks to Python script after save with nbconvert
+    replaces `jupyter notebook --script`
+    """
+    from nbconvert.exporters.script import ScriptExporter
+    warnings.warn("`_post_save_script` is deprecated and will be removed in Notebook 5.0", DeprecationWarning)
+
+    if model['type'] != 'notebook':
+        return
+
+    global _script_exporter
+    if _script_exporter is None:
+        _script_exporter = ScriptExporter(parent=contents_manager)
+    log = contents_manager.log
+
+    base, ext = os.path.splitext(os_path)
+    script, resources = _script_exporter.from_filename(os_path)
+    script_fname = base + resources.get('output_extension', '.txt')
+    log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
+    with io.open(script_fname, 'w', encoding='utf-8') as f:
+        f.write(script)
+
+
+def _save_data(filename, data):
+    """
+    Save formatted skeleton data to a pickle file
+    """
+    if filename[-2:] == ".p":
+        filename = filename
+    else:
+        filename = str(filename + ".p")
+
+    with open(filename, 'wb') as fp:
+        pickle.dump(data, fp, protocol=pickle.HIGHEST_PROTOCOL)
+    print("Saved data to file: " + filename)
+
+
+def download_unknowns(url: str) -> None:
+    """."""
+    page_content: bytes = get_none_soup(url)
+    page_string: bytes = page_content[0:100]
+    """parse section of page bytes and use as name. If unknown encoding
+    convert to number string (exclude first few bytes that state filetype) """
+    try:
+        page_unicode = page_string.decode("ISO-8859-1").replace(R'%', '_')
+        page_parsed = [char for char in page_unicode if char.isalnum() or char == '_']
+        unknown_file_name = "".join(page_parsed)[10:30]
+    except UnicodeDecodeError:
+        try:
+            page_unicode = page_string.decode('utf-8').replace(R'%', '_')
+            page_parsed = [char for char in page_unicode if char.isalnum() or char == '_']
+            unknown_file_name = "".join(page_parsed)[10:30]
+        except UnicodeDecodeError:
+            unknown_file_name = "unk_"
+            for char in page_content[10:30]:
+                if char != b'\\':
+                    unknown_file_name += str(char)
+    print(unknown_file_name)
+    """check beginning of page bytes for a filetype"""
+    if b'%PDF' in page_string:  # ;
+        extension = '.pdf'
+    else:
+        extension = '.unk.txt'
+
+    with open(save_file_dir + '/' + unknown_file_name + extension, 'wb') as file:
+        file.write(page_content)  # ; print(save_file_dir)
+
+
+def download_images(start_url: str, filetypes: List[str]) -> None:
+    """.."""
+    base_url = get_base_url(start_url)
+    # print(start_url)
+    soup = get_soup(start_url)  # ;print(soup)
+    if soup is not None:
+        for index, image in enumerate(soup.select('img')):  # print(image)
+            # image_raw = str(image)
+            src_raw = str(image.get('src'))  # print(image.attrs['src'])
+            if src_raw.startswith('http'):
+                image_url = src_raw
+            elif src_raw.startswith('/'):
+                image_url = base_url + src_raw
+            else:
+                image_url = src_raw
+            # print(image_url)
+            for image_type in filter(lambda x: x in src_raw, filetypes):  # print(image)
+                image_response = requests.get(image_url, stream=True)
+                if image_response.status_code == 200:
+                    image_name = re.sub(r'.*/', '', src_raw).replace(R'.', '_')
+                    # print(image_name, index)
+                    fp: BinaryIO = open(save_image_dir + '/' + image_name + str(index) + image_type, 'wb')
+                    fp.write(image_response.content)
+                    fp.close()
+                    # i = Image.open(BytesIO(image_response.content))
+                    # i.save(image_name)
+
+
+def _unicode_save(self, temp_file):
+    im = pygame.Surface((10, 10), 0, 32)
+    try:
+        with open(temp_file, "w") as f:
+            pass
+        os.remove(temp_file)
+    except IOError:
+        raise unittest.SkipTest("the path cannot be opened")
+
+    self.assertFalse(os.path.exists(temp_file))
+
+    try:
+        imageext.save_extended(im, temp_file)
+
+        self.assertGreater(os.path.getsize(temp_file), 10)
+    finally:
+        try:
+            os.remove(temp_file)
+        except EnvironmentError:
+            pass

+ 0 - 3
Target/File/checkpoint_4.py

@@ -1,3 +0,0 @@
-def has_checkpoint(self):
-        save_file = os.path.join(self.save_dir, "last_checkpoint")
-        return os.path.exists(save_file)

+ 0 - 2
Target/File/checkpoint_7.py

@@ -1,2 +0,0 @@
-def _load_file(self, f):
-        return torch.load(f, map_location=torch.device("cpu"))

+ 46 - 0
Target/File/utils_10.py

@@ -0,0 +1,46 @@
+def extract_codebook(dataset='train'):
+    f = open(data_path + dataset + '_list.txt', 'r')
+    i = 0
+    for file_name in f:
+        i = i + 1
+        if not (i % 10):
+            print(i)
+        # load audio file
+        file_name = file_name.rstrip('\n')
+        file_path = data_path + file_name
+        # #print file_path
+        y0, sr = librosa.load(file_path, sr=22050)
+        # we use first 1 second
+        half = len(y0) / 4
+        y = y0[:round(half)]
+        # STFT
+        S_full, phase = librosa.magphase(librosa.stft(y, n_fft=1024, window='hann', hop_length=256, win_length=1024))
+        n = len(y)
+
+        # Check the shape of matrix: row must corresponds to the example index !!!
+        X = S_full.T
+
+        # codebook by using K-Means Clustering
+        K = 20
+        kmeans = KMeans(n_clusters=K, random_state=0).fit(X)
+        features_kmeans = np.zeros(X.shape[0])
+        # for each sample, summarize feature!!!
+        codebook = np.zeros(K)
+        for sample in range(X.shape[0]):
+            features_kmeans[sample] = kmeans.labels_[sample]
+
+        # codebook histogram!
+        unique, counts = np.unique(features_kmeans, return_counts=True)
+
+        for u in unique:
+            u = int(u)
+            codebook[u] = counts[u]
+        # save mfcc as a file
+        file_name = file_name.replace('.wav', '.npy')
+        save_file = codebook_path + file_name
+
+        if not os.path.exists(os.path.dirname(save_file)):
+            os.makedirs(os.path.dirname(save_file))
+        np.save(save_file, codebook)
+
+    f.close()

+ 7 - 0
Target/File/utils_11.py

@@ -0,0 +1,7 @@
+def run(self):
+    file = QtCore.QFile(self.filePath)
+    if not file.open(QtCore.QIODevice.WriteOnly):
+        self.saveFileFinished.emit(SAVE_FILE_ERROR, self.urlStr, self.filePath)
+    file.write(self.fileData)
+    file.close()
+    self.saveFileFinished.emit(0, self.urlStr, self.filePath)

+ 7 - 0
Target/File/utils_12.py

@@ -0,0 +1,7 @@
+def saveFile(self, fileName, data):
+    file = QtCore.QFile(fileName)
+    if not file.open(QtCore.QIODevice.WriteOnly):
+        return False
+    file.write(data.readAll())
+    file.close()
+    return True

+ 9 - 0
Target/File/utils_13.py

@@ -0,0 +1,9 @@
+def serialize(self):
+    """Callback to serialize the array."""
+    string_file = io.BytesIO()
+    try:
+        numpy.save(string_file, self.array, allow_pickle=False)
+        serialized = string_file.getvalue()
+    finally:
+        string_file.close()
+    return serialized

+ 39 - 0
Target/File/utils_14.py

@@ -0,0 +1,39 @@
+def train(self, save=False, save_dir=None):
+    train_img_list = glob.glob(self.path_train + "/*")
+    print(train_img_list)
+
+    train_features = []
+
+    for img_file in train_img_list:
+        img = io.imread(img_file)
+        img = color.rgb2lab(img)
+        img_features = self.extract_texton_feature(img, self.fb, self.nb_features)
+        train_features.extend(img_features)
+
+    train_features = np.array(train_features)
+    print(train_features.shape)
+
+    kmeans_cluster = MiniBatchKMeans(n_clusters=self.nb_clusters, verbose=1, max_iter=300)
+    kmeans_cluster.fit(train_features)
+    print(kmeans_cluster.cluster_centers_)
+    print(kmeans_cluster.cluster_centers_.shape)
+
+    self.cluster = kmeans_cluster
+
+    # save kmeans result
+    if save is True:
+        with open(save_dir, 'wb') as f:
+            pickle.dump(self.cluster, f)
+
+    def save(self, event):
+        if not self.filename:
+            self.save_as(event)
+        else:
+            if self.writefile(self.filename):
+                self.set_saved(True)
+                try:
+                    self.editwin.store_file_breaks()
+                except AttributeError:  # may be a PyShell
+                    pass
+        self.text.focus_set()
+        return "break"

+ 15 - 0
Target/File/utils_15.py

@@ -0,0 +1,15 @@
+def writefile(self, filename):
+    self.fixlastline()
+    chars = self.encode(self.text.get("1.0", "end-1c"))
+    if self.eol_convention != "\n":
+        chars = chars.replace("\n", self.eol_convention)
+    try:
+        f = open(filename, "wb")
+        f.write(chars)
+        f.flush()
+        f.close()
+        return True
+    except IOError as msg:
+        tkMessageBox.showerror("I/O Error", str(msg),
+                               master=self.text)
+        return False

+ 23 - 0
Target/File/utils_16.py

@@ -0,0 +1,23 @@
+def save_response_content(response,
+                          destination,
+                          file_size=None,
+                          chunk_size=32768):
+    if file_size is not None:
+        pbar = tqdm(total=math.ceil(file_size / chunk_size), unit='chunk')
+
+        readable_file_size = sizeof_fmt(file_size)
+    else:
+        pbar = None
+
+    with open(destination, 'wb') as f:
+        downloaded_size = 0
+        for chunk in response.iter_content(chunk_size):
+            downloaded_size += chunk_size
+            if pbar is not None:
+                pbar.update(1)
+                pbar.set_description(f'Download {sizeof_fmt(downloaded_size)} '
+                                     f'/ {readable_file_size}')
+            if chunk:  # filter out keep-alive new chunks
+                f.write(chunk)
+        if pbar is not None:
+            pbar.close()

+ 71 - 0
Target/File/utils_17.py

@@ -0,0 +1,71 @@
+def generateHuman(cloth_list, person_id, sex):
+    haveAcc = 0
+    # load acc
+    hair = open('modeleTxt/hair.txt', 'r').readlines()
+    shoe = open('modeleTxt/shoe.txt', 'r').readlines()
+    pifu = open('modeleTxt/skin.txt', 'r').readlines()
+
+    if not os.path.exists(person_save_Folder):
+        os.makedirs(person_save_Folder)
+
+    if sex > 0:
+        Gender1 = 1000000
+    else:
+        Gender1 = 0
+    #     setting
+    Gender = '%.6f' % (Gender1 / 1000000)
+    Muscle = '%.6f' % (random.randint(0, 1000000) / 1000000)
+    African_1 = random.randint(0, 1000000)
+    African = '%.6f' % (African_1 / 1000000)
+    Asian_1 = random.randint(0, 1000000 - African_1)
+    Asian = '%.6f' % (Asian_1 / 1000000)
+    Caucasian = '%.6f' % ((1000000 - Asian_1 - African_1) / 1000000)
+    if Gender1 > 1000000 / 2:
+        m_height = random.gauss(170, 5.7) / 200
+        while m_height > 1:
+            m_height = random.gauss(170, 5.7) / 200
+        Height = '%.6f' % (m_height)
+    else:
+        m_height = random.gauss(160, 5.2) / 200
+        while m_height > 1:
+            m_height = random.gauss(160, 5.2) / 200
+        Height = '%.6f' % (m_height)
+    BreastSize = '%.6f' % (random.randint(0, 70) / 100)
+    Age = '%.6f' % (random.randint(20, 90) / 100)
+    BreastFirmness = '%.6f' % (random.randint(30, 100) / 100)
+    Weight = '%.6f' % (random.randint(0, 1000000) / 1000000)
+
+    file_name = 'B' + str(person_id)
+    # creating person file
+    f = open(person_save_Folder + file_name + ".mhm", 'a')
+    f.write('# Written by MakeHuman 1.1.1\n')
+    f.write('version v1.1.1\n')
+    f.write('tags ' + file_name + '\n')
+    f.write('camera 0.0 0.0 0.0 0.0 0.0 1.0\n')
+    f.write('modifier macrodetails-universal/Muscle ' + Muscle + '\n')
+    f.write('modifier macrodetails/African ' + African + '\n')
+    f.write('modifier macrodetails-proportions/BodyProportions 0.500000\n')
+    f.write('modifier macrodetails/Gender ' + Gender + '\n')
+    f.write('modifier macrodetails-height/Height ' + Height + '\n')
+    f.write('modifier breast/BreastSize ' + BreastSize + '\n')
+    f.write('modifier macrodetails/Age ' + Age + '\n')
+    f.write('modifier breast/BreastFirmness ' + BreastFirmness + '\n')
+    f.write('modifier macrodetails/Asian ' + Asian + '\n')
+    f.write('modifier macrodetails/Caucasian ' + Caucasian + '\n')
+    f.write('modifier macrodetails-universal/Weight ' + Weight + '\n')
+    f.write('skeleton cmu_mb.mhskel\n')
+    f.write('eyes HighPolyEyes 2c12f43b-1303-432c-b7ce-d78346baf2e6\n')
+
+    # adding clothes
+    if Gender1 > 1000000 / 2:
+        f.write(hair[random.randint(0, len(hair) - 1)])
+    else:
+        f.write(hair[random.randint(0, len(hair) - 1)])
+    f.write(shoe[random.randint(0, len(shoe) - 1)])
+    for i in range(0, len(cloth_list)):
+        f.write(cloth_list[i] + '\n')
+    f.write('clothesHideFaces True\n')
+    f.write(pifu[random.randint(0, len(pifu) - 1)])
+    f.write('material Braid01 eead6f99-d6c6-4f6b-b6c2-210459d7a62e braid01.mhmat\n')
+    f.write('material HighPolyEyes 2c12f43b-1303-432c-b7ce-d78346baf2e6 eyes/materials/brown.mhmat\n')
+    f.write('subdivide False\n')

+ 20 - 0
Target/File/utils_18.py

@@ -0,0 +1,20 @@
+def notice_write(request):
+    if request.method == 'POST':
+        form = ContentForm(request.POST)
+        form_file = FileForm(request.POST, request.FILES)
+        if form.is_valid():
+            question = form.save(commit=False)
+            question.author = request.user
+            question.create_date = timezone.now()
+            question.boardname_id = 7
+            question.save()
+            if form_file.is_valid():
+                form_file = FileForm(request.POST, request.FILES)
+                file_save = form_file.save(commit=False)
+                file_save.author = request.user
+                file_save.postcontent = question
+                file_save.boardname_id = 7
+                file_save.file = request.FILES.get("file")
+                file_save.save()
+            return redirect('notice_view')
+    return render(request, 'notice_write.html')

+ 20 - 0
Target/File/utils_19.py

@@ -0,0 +1,20 @@
+def test_write(request):
+    if request.method == 'POST':
+        form = ContentForm(request.POST)
+        form_file = FileForm(request.POST, request.FILES)
+        if form.is_valid():
+            question = form.save(commit=False)
+            question.author = request.user
+            question.create_date = timezone.now()
+            question.boardname_id = 14
+            question.save()
+            if form_file.is_valid():
+                form_file = FileForm(request.POST, request.FILES)
+                file_save = form_file.save(commit=False)
+                file_save.author = request.user
+                file_save.postcontent = question
+                file_save.boardname_id = 14
+                file_save.file = request.FILES.get("file")
+                file_save.save()
+            return redirect('test_list')
+    return render(request, 'test_write.html')

+ 33 - 0
Target/File/utils_20.py

@@ -0,0 +1,33 @@
+def down_file(url, name, path):
+    if os.path.exists(path):
+        return
+
+    print("开始下载:" + name + ".mp3")
+    headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
+               "Accept-Encoding": "gzip, deflate, br",
+               "Accept-Language": "zh-CN,zh;q=0.9",
+               "Upgrade-Insecure-Requests": "1",
+               'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
+
+    count = 0
+    while count < 3:
+        try:
+
+            r = requests.get(url, headers=headers, stream=True, timeout=60)
+            # print(r.status_code)
+            if (r.status_code == 200):
+                with open(path, "wb+") as f:
+                    for chunk in r.iter_content(1024):
+                        f.write(chunk)
+                print("完成下载:" + name + ".mp3")
+                break
+        except Exception as e:
+            print(e)
+            print("下载出错:" + name + ".mp3,3秒后重试")
+            if os.path.exists(path):
+                os.remove(path)
+
+            time.sleep(3)
+        count += 1
+
+    pass

+ 5 - 0
Target/File/utils_21.py

@@ -0,0 +1,5 @@
+def save_as():
+    global file_name
+    content = content_text.get(1.0, 'end')
+    with open(file_name, 'w') as save:
+        save.write(content)

+ 11 - 0
Target/File/utils_22.py

@@ -0,0 +1,11 @@
+def export_save(data_player, data_kick, guild_id, save_name=""):
+    if save_name: save_name = "_" + save_name
+    print(" - Partie enregistr¨¦e -")
+
+    with open(f"saves/save{save_name}.json", "w") as file:
+        file.write(json.dumps(
+            {
+                "players": [data_player[player_id].export() for player_id in data_player],
+                "kicks": data_kick,
+                "guild_id": guild_id
+            }, indent=4))

+ 20 - 0
Target/File/utils_23.py

@@ -0,0 +1,20 @@
+def conv(heic_path, save_dir, filetype, quality):
+    # 悵湔珂及犯奴伊弁玄伉午白央奶伙靡
+    extension = "." + filetype
+    save_path = save_dir / filetype / pathlib.Path(*heic_path.parts[1:]).with_suffix(extension)
+    # 白巧伙母釬傖
+    save_path.parent.mkdir(parents=True, exist_ok=True)
+    # HEIC白央奶伙pyheif匹掂心煋心
+    heif_file = pyheif.read(heic_path)
+    # 掂心煋氏分白央奶伙及笢旯毛data劐杅尺
+    data = Image.frombytes(
+        heif_file.mode,
+        heif_file.size,
+        heif_file.data,
+        "raw",
+        heif_file.mode,
+        heif_file.stride,
+    )
+    # JPEG匹悵湔
+    data.save(save_path, quality=quality)
+    print("悵湔ㄩ", save_path)

+ 50 - 0
Target/File/utils_24.py

@@ -0,0 +1,50 @@
+def parsing_sravni_ru(soup):
+    names = soup.find_all('span', class_='_106rrj0')  # scraping names
+
+    # scraping age childrens
+    age_divs = soup.find_all('div', {'style': 'grid-area:firstCell-1', 'class': '_pjql8'})
+    ages = []
+    for i in age_divs:
+        age_span = i.find('span')
+        ages.append(age_span)
+
+    # scraping course duration
+    duration_divs = soup.find_all('div', {'style': 'grid-area:secondCell-1', 'class': '_pjql8'})
+    durations = []
+    for i in duration_divs:
+        duration_span = i.find('span')
+        durations.append(duration_span)
+
+    # scraping price
+    prices = soup.find_all('span', class_='_e9qrci _k8dl2y')
+
+    items = []
+    for (n, l, i, p) in zip(names, ages, durations, prices):
+        name = n.text.strip()
+        age = l.text.strip()
+        duration = i.text.strip()
+        price = p.text.strip().replace('\xa0', '')
+        items.append(
+            {
+                'name': name,
+                'age': age,
+                'duration': duration,
+                'price': price,
+            }
+        )
+
+    # save json file
+    with open("./data/items.json", "w", encoding="utf-8") as f:
+        json.dump(items, f, indent=4, ensure_ascii=False)
+
+    with open("./data/items.csv", 'a', encoding="utf-8") as file:
+        for i in items:
+            writer = csv.writer(file)
+            writer.writerow(
+                (
+                    i['name'],
+                    i['age'],
+                    i['duration'],
+                    i['price']
+                )
+            )

+ 4 - 0
Target/File/utils_25.py

@@ -0,0 +1,4 @@
+def save_to_file(self, path):
+    with open(path, "w") as f:
+        f.write(self.cert_pem())
+        f.write(self.key_pem())

+ 3 - 0
Target/File/utils_26.py

@@ -0,0 +1,3 @@
+def save_cert_to_file(self, path):
+    with open(path, "w") as f:
+        f.write(self.cert_pem())

+ 23 - 0
Target/File/utils_27.py

@@ -0,0 +1,23 @@
+def _save_large_file(self, os_path, content, format):
+    """Save content of a generic file."""
+    if format not in {'text', 'base64'}:
+        raise web.HTTPError(
+            400,
+            "Must specify format of file contents as 'text' or 'base64'",
+        )
+    try:
+        if format == 'text':
+            bcontent = content.encode('utf8')
+        else:
+            b64_bytes = content.encode('ascii')
+            bcontent = base64.b64decode(b64_bytes)
+    except Exception as e:
+        raise web.HTTPError(
+            400, u'Encoding error saving %s: %s' % (os_path, e)
+        )
+
+    with self.perm_to_403(os_path):
+        if os.path.islink(os_path):
+            os_path = os.path.join(os.path.dirname(os_path), os.readlink(os_path))
+        with io.open(os_path, 'ab') as f:
+            f.write(bcontent)

+ 55 - 0
Target/File/utils_28.py

@@ -0,0 +1,55 @@
+def get_unzip_hdfs_file(hdfs_file_url, save_dir):
+    # 判断保存路径是否存在,不存在的话创建此目录
+    if os.path.isdir(save_dir):
+        pass
+    else:
+        os.mkdir(save_dir)
+
+    # hdfs文件名
+    filename = hdfs_file_url.split("/").pop()
+
+    # 保存到本地的文件名
+    save_filename = ""
+
+    # 判断是否为压缩文件
+    if filename.endswith(".gz"):
+        save_filename = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time())) + ".gz"
+    else:
+        save_filename = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
+
+    # 判断保存路径最后是否有/
+    if save_dir.endswith("/"):
+        save_file = save_dir + save_filename
+    else:
+        save_file = save_dir + "/" + save_filename
+
+    # 生成下载hdfs文件的命令
+    hadoop_get = 'hadoop fs -get %s %s' % (hdfs_file_url, save_file)
+    logger.info("download hdfs file cammond: " + hadoop_get)
+    # shell执行生成的hdfs命令
+    try:
+        os.system(hadoop_get)
+    except Exception as e:
+        logger.error(e)
+        return False
+
+    # 判断下载的hdfs文件是否为压缩文件
+    if save_file.endswith(".gz"):
+
+        # 对此压缩文件进行压缩
+        try:
+            # 解压后的文件名
+            f_name = save_file.replace(".gz", "")
+            # 解压缩
+            g_file = gzip.GzipFile(save_file)
+            # 写入文件
+            open(f_name, "w+").write(g_file.read())
+            # 关闭文件流
+            g_file.close()
+
+            return f_name
+        except Exception as e:
+            logger.error(e)
+            return False
+    else:
+        return save_file

+ 42 - 0
Target/File/utils_29.py

@@ -0,0 +1,42 @@
+def get_unzip_hdfs_file_from_dir(hdfs_dir, save_dir):
+    # 命令:获取hdfs目录下的文件
+    hadoop_ls = "hadoop fs -ls %s | grep -i '^-'" % hdfs_dir
+
+    # 解压后的文件列表
+    save_file_list = []
+    # 执行shell命令
+    hdfs_result = exec_sh(hadoop_ls, None)
+
+    # 获取命令执行输出
+    hdfs_stdout = hdfs_result["stdout"]
+    # print("hdfs_stdout = " + hdfs_stdout)
+
+    # 要下载的HDFS文件列表
+    hdfs_list = []
+
+    # 判断是否有输出
+    if hdfs_stdout:
+        # 以行分割, 一行是一个文件的信息
+        hdfs_lines = hdfs_stdout.split("\n")
+
+        # 对每一行进行处理
+        for line in hdfs_lines:
+
+            # 以空白字符为分割符获取hdfs文件名
+            line_list = re.split("\s+", line)
+
+            # -rw-r--r--   2 caoweidong supergroup      42815 2017-01-23 14:20 /user/000000_0.gz
+            if line_list.__len__() == 8:
+                # print("line_list[7] = " + line_list[7])
+
+                # HDFS文件加入下载列表
+                hdfs_list.append(line_list[7])
+            else:
+                pass
+        # 下载文件
+        for file in hdfs_list:
+            save_filename = get_unzip_hdfs_file(file, save_dir)
+            save_file_list.append(save_filename)
+        return save_file_list
+    else:
+        return False

+ 3 - 0
Target/File/utils_3.py

@@ -0,0 +1,3 @@
+def pickle_dump(item, out_file):
+    with open(out_file, "wb") as opened_file:
+        pickle.dump(item, opened_file)

+ 42 - 0
Target/File/utils_30.py

@@ -0,0 +1,42 @@
+def save_game(self):
+    save_file = open("saves/main_save.xml", "w+")
+
+    level = self.save_level()
+    self.tree.append(level)
+
+    team = self.save_team()
+    self.tree.append(team)
+
+    # Store XML tree in file
+    save_file.write(etree.tostring(self.tree, pretty_print=True, encoding="unicode"))
+
+    save_file.close()
+
+    def save_upload_file(
+            self,
+            file: UploadFile,
+            save_dir_path: pathlib.Path,
+            job_id: str,
+            dt_string: str,
+    ) -> pathlib.Path:
+        """Save `file` under `save_dir_path`.
+        Args:
+            file (UploadFile): A file want to save.
+            save_dir_path (pathlib.Path): A path to directory where file will be saved.
+            job_id (str): A job id. This will used part of filename.
+            dt_string (str): A datetime info. This will used part of filename.
+        Return:
+            pathlib.Path: A path where file is saved.
+        """
+        if not save_dir_path.exists():
+            save_dir_path.mkdir(parents=True, exist_ok=True)
+
+        save_path: Final = save_dir_path / f"{dt_string}_{job_id}_{file.filename}"
+
+        try:
+            with save_path.open("wb") as f:
+                shutil.copyfileobj(file.file, f)
+        finally:
+            file.file.close()
+
+        return save_path

+ 6 - 0
Target/File/utils_31.py

@@ -0,0 +1,6 @@
+def save_output(output, list_to_save):
+    if not output:
+        with open(output, "w") as f:
+            for item in list_to_save:
+                f.write("%s\n" % item)
+        print(f"Output file: {output}")

+ 4 - 0
Target/File/utils_32.py

@@ -0,0 +1,4 @@
+def _saveTestWavFile(self, filename, wav_data):
+    with open(filename, "wb") as f:
+        file_path = os.path.join(dir_name, "some_audio_%d.wav" % i)
+        self._saveTestWavFile(file_path, wav_data)

+ 23 - 0
Target/File/utils_33.py

@@ -0,0 +1,23 @@
+def _save_large_file(self, os_path, content, format):
+    """Save content of a generic file."""
+    if format not in {'text', 'base64'}:
+        raise web.HTTPError(
+            400,
+            "Must specify format of file contents as 'text' or 'base64'",
+        )
+    try:
+        if format == 'text':
+            bcontent = content.encode('utf8')
+        else:
+            b64_bytes = content.encode('ascii')
+            bcontent = base64.b64decode(b64_bytes)
+    except Exception as e:
+        raise web.HTTPError(
+            400, u'Encoding error saving %s: %s' % (os_path, e)
+        )
+
+    with self.perm_to_403(os_path):
+        if os.path.islink(os_path):
+            os_path = os.path.join(os.path.dirname(os_path), os.readlink(os_path))
+        with io.open(os_path, 'ab') as f:
+            f.write(bcontent)

+ 21 - 0
Target/File/utils_34.py

@@ -0,0 +1,21 @@
+def _post_save_script(model, os_path, contents_manager, **kwargs):
+    """convert notebooks to Python script after save with nbconvert
+    replaces `jupyter notebook --script`
+    """
+    from nbconvert.exporters.script import ScriptExporter
+    warnings.warn("`_post_save_script` is deprecated and will be removed in Notebook 5.0", DeprecationWarning)
+
+    if model['type'] != 'notebook':
+        return
+
+    global _script_exporter
+    if _script_exporter is None:
+        _script_exporter = ScriptExporter(parent=contents_manager)
+    log = contents_manager.log
+
+    base, ext = os.path.splitext(os_path)
+    script, resources = _script_exporter.from_filename(os_path)
+    script_fname = base + resources.get('output_extension', '.txt')
+    log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
+    with io.open(script_fname, 'w', encoding='utf-8') as f:
+        f.write(script)

+ 12 - 0
Target/File/utils_35.py

@@ -0,0 +1,12 @@
+def _save_data(filename, data):
+    """
+    Save formatted skeleton data to a pickle file
+    """
+    if filename[-2:] == ".p":
+        filename = filename
+    else:
+        filename = str(filename + ".p")
+
+    with open(filename, 'wb') as fp:
+        pickle.dump(data, fp, protocol=pickle.HIGHEST_PROTOCOL)
+    print("Saved data to file: " + filename)

+ 29 - 0
Target/File/utils_36.py

@@ -0,0 +1,29 @@
+def download_unknowns(url: str) -> None:
+    """."""
+    page_content: bytes = get_none_soup(url)
+    page_string: bytes = page_content[0:100]
+    """parse section of page bytes and use as name. If unknown encoding
+    convert to number string (exclude first few bytes that state filetype) """
+    try:
+        page_unicode = page_string.decode("ISO-8859-1").replace(R'%', '_')
+        page_parsed = [char for char in page_unicode if char.isalnum() or char == '_']
+        unknown_file_name = "".join(page_parsed)[10:30]
+    except UnicodeDecodeError:
+        try:
+            page_unicode = page_string.decode('utf-8').replace(R'%', '_')
+            page_parsed = [char for char in page_unicode if char.isalnum() or char == '_']
+            unknown_file_name = "".join(page_parsed)[10:30]
+        except UnicodeDecodeError:
+            unknown_file_name = "unk_"
+            for char in page_content[10:30]:
+                if char != b'\\':
+                    unknown_file_name += str(char)
+    print(unknown_file_name)
+    """check beginning of page bytes for a filetype"""
+    if b'%PDF' in page_string:  # ;
+        extension = '.pdf'
+    else:
+        extension = '.unk.txt'
+
+    with open(save_file_dir + '/' + unknown_file_name + extension, 'wb') as file:
+        file.write(page_content)

+ 24 - 0
Target/File/utils_37.py

@@ -0,0 +1,24 @@
+def download_images(start_url: str, filetypes: List[str]) -> None:
+    """.."""
+    base_url = get_base_url(start_url)
+    # print(start_url)
+    soup = get_soup(start_url)  # ;print(soup)
+    if soup is not None:
+        for index, image in enumerate(soup.select('img')):  # print(image)
+            # image_raw = str(image)
+            src_raw = str(image.get('src'))  # print(image.attrs['src'])
+            if src_raw.startswith('http'):
+                image_url = src_raw
+            elif src_raw.startswith('/'):
+                image_url = base_url + src_raw
+            else:
+                image_url = src_raw
+            # print(image_url)
+            for image_type in filter(lambda x: x in src_raw, filetypes):  # print(image)
+                image_response = requests.get(image_url, stream=True)
+                if image_response.status_code == 200:
+                    image_name = re.sub(r'.*/', '', src_raw).replace(R'.', '_')
+                    # print(image_name, index)
+                    fp: BinaryIO = open(save_image_dir + '/' + image_name + str(index) + image_type, 'wb')
+                    fp.write(image_response.content)
+                    fp.close()

+ 20 - 0
Target/File/utils_38.py

@@ -0,0 +1,20 @@
+def _unicode_save(self, temp_file):
+    im = pygame.Surface((10, 10), 0, 32)
+    try:
+        with open(temp_file, "w") as f:
+            pass
+        os.remove(temp_file)
+    except IOError:
+        raise unittest.SkipTest("the path cannot be opened")
+
+    self.assertFalse(os.path.exists(temp_file))
+
+    try:
+        imageext.save_extended(im, temp_file)
+
+        self.assertGreater(os.path.getsize(temp_file), 10)
+    finally:
+        try:
+            os.remove(temp_file)
+        except EnvironmentError:
+            pass

+ 8 - 0
Target/File/utils_4.py

@@ -0,0 +1,8 @@
+def write_to_clf(clf_data, save_file):
+    # Save dataset for text classification to file.
+    """
+    clf_data: List[List[str]] [[text1, label1],[text2,label2]...]
+    file format: tsv, row: text + tab + label
+    """
+    with open(save_file, 'w', encoding='utf-8') as f:
+        f.writelines("\n".join(["\t".join(str(r) for r in row) for row in clf_data]))

+ 7 - 0
Target/File/utils_5.py

@@ -0,0 +1,7 @@
+def write_to_seq2seq(seq_data, save_file):
+    """
+    clf_data: List[List[str]] [[src1, tgt1],[src2,tgt2]...]
+    file format: tsv, row: src + tab + tgt
+    """
+    with open(save_file, 'w', encoding='utf-8') as f:
+        f.writelines("\n".join(["\t".join([str(r) for r in row]) for row in seq_data]))

+ 9 - 0
Target/File/utils_6.py

@@ -0,0 +1,9 @@
+def write_to_ner(cls, ner_data, save_file):
+    """
+    :param cls:
+    :param ner_data:
+    :param save_file:
+    :return:
+    """
+    with open(save_file, 'w', encoding='utf-8') as f:
+        f.writelines("\n".join(["\t".join(str(r) for r in row) for row in ner_data]))

+ 6 - 0
Target/File/utils_7.py

@@ -0,0 +1,6 @@
+def quick_save(self, model, save_name, optimizer=None):
+    save_path = os.path.join(self.save_dir, save_name + '_weights.pth')
+    if optimizer:
+        opt_weights = optimizer.get_weights()
+        np.save(os.path.join(self.save_dir, save_name + '_opt_weights'), opt_weights)
+    model.save_weights(save_path, save_format='h5')

+ 44 - 0
Target/File/utils_8.py

@@ -0,0 +1,44 @@
+def save(self, model, iter_nb, train_metrics_values, test_metrics_values, tasks_weights=[], optimizer=None):
+    self.logs_dict['train'][str(iter_nb)] = {}
+    self.logs_dict['val'][str(iter_nb)] = {}
+    for k in range(len(self.metrics)):
+        self.logs_dict['train'][str(iter_nb)][self.metrics[k]] = float(train_metrics_values[k])
+        self.logs_dict['val'][str(iter_nb)][self.metrics[k]] = float(test_metrics_values[k])
+
+    if len(tasks_weights) > 0:
+        for k in range(len(tasks_weights)):
+            self.logs_dict['val'][str(iter_nb)]['weight_' + str(k)] = tasks_weights[k]
+
+    with open(self.logs_file, 'w') as f:
+        json.dump(self.logs_dict, f)
+
+    ckpt = {
+        'model_state_dict': model.state_dict(),
+        'iter_nb': iter_nb,
+    }
+    if optimizer:
+        ckpt['optimizer_state_dict'] = optimizer.state_dict()
+
+    # Saves best miou score if reached
+    if 'MEAN_IOU' in self.metrics:
+        miou = float(test_metrics_values[self.metrics.index('MEAN_IOU')])
+        if miou > self.best_miou and iter_nb > 0:
+            print('Best miou. Saving it.')
+            torch.save(ckpt, self.best_miou_weights_file)
+            self.best_miou = miou
+            self.config_dict['best_miou'] = self.best_miou
+    # Saves best relative error if reached
+    if 'REL_ERR' in self.metrics:
+        rel_error = float(test_metrics_values[self.metrics.index('REL_ERR')])
+        if rel_error < self.best_rel_error and iter_nb > 0:
+            print('Best rel error. Saving it.')
+            torch.save(ckpt, self.best_rel_error_weights_file)
+            self.best_rel_error = rel_error
+            self.config_dict['best_rel_error'] = self.best_rel_error
+
+    # Saves last checkpoint
+    torch.save(ckpt, self.last_checkpoint_weights_file)
+    self.iter_nb = iter_nb
+    self.config_dict['iter'] = self.iter_nb
+    with open(self.config_file, 'w') as f:
+        json.dump(self.config_dict, f)

+ 46 - 0
Target/File/utils_9.py

@@ -0,0 +1,46 @@
+def extract_spec(dataset='train'):
+    f = open(data_path + dataset + '_list.txt', 'r')
+
+    i = 0
+    for file_name in f:
+        i = i + 1
+        if not (i % 10):
+            print(i)
+
+        # load audio file
+        file_name = file_name.rstrip('\n')
+        file_path = data_path + file_name
+        # print file_path
+        y0, sr = librosa.load(file_path, sr=22050)
+        # we use first 1 second
+        half = len(y0) / 4
+        y = y0[:round(half)]
+        # mfcc
+        mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=MFCC_DIM)
+        # delta mfcc and double delta
+        delta_mfcc = librosa.feature.delta(mfcc)
+        ddelta_mfcc = librosa.feature.delta(mfcc, order=2)
+
+        # STFT
+        D = np.abs(librosa.core.stft(y, hop_length=512, n_fft=1024, win_length=1024))
+        D_dB = librosa.amplitude_to_db(D, ref=np.max)
+
+        # mel spectrogram
+        mel_S = librosa.feature.melspectrogram(S=D, sr=sr, n_mels=128)
+        S_dB = librosa.power_to_db(mel_S, ref=np.max)  # log compression
+
+        # spectral centroid
+        spec_centroid = librosa.feature.spectral_centroid(S=D)
+
+        # concatenate all features
+        features = np.concatenate([mfcc, delta_mfcc, ddelta_mfcc, spec_centroid], axis=0)
+
+        # save mfcc as a file
+        file_name = file_name.replace('.wav', '.npy')
+        save_file = spec_path + file_name
+
+        if not os.path.exists(os.path.dirname(save_file)):
+            os.makedirs(os.path.dirname(save_file))
+        np.save(save_file, features)
+
+    f.close()

+ 0 - 26
Target/Hash/EncrypC_3.py

@@ -1,26 +0,0 @@
-def encrypt(self):
-
-        # create a cipher object
-
-        cipher_object = AES.new(
-            self.hashed_key_salt["key"], AES.MODE_CFB, self.hashed_key_salt["salt"]
-        )
-
-        self.abort()  # if the output file already exists, remove it first
-
-        input_file = open(self.user_file, "rb")
-        output_file = open(self.encrypt_output_file, "ab")
-        done_chunks = 0
-
-        for piece in self.read_in_chunks(input_file, self.chunk_size):
-            encrypted_content = cipher_object.encrypt(piece)
-            output_file.write(encrypted_content)
-            done_chunks += 1
-            yield done_chunks / self.total_chunks * 100
-
-        input_file.close()
-        output_file.close()
-
-        # clean up the cipher object
-
-        del cipher_object

+ 0 - 20
Target/Hash/EncryptionDecryption_2.py

@@ -1,20 +0,0 @@
-def encrypt(self, key, filename):
-        chunksize = 128 * 1024
-        outFile = os.path.join(os.path.dirname(filename), "(Secured)" + os.path.basename(filename))
-        filesize = str(os.path.getsize(filename)).zfill(16)
-        IV = Random.new().read(AES.block_size)
-        print(IV, len(IV))
-        encryptor = AES.new(key, AES.MODE_CBC, IV)
-
-        with open(filename, "rb") as infile:
-            with open(outFile, "wb") as outfile:
-                outfile.write(filesize.encode('utf-8'))
-                outfile.write(IV)
-                while True:
-                    chunk = infile.read(chunksize)
-                    if len(chunk) == 0:
-                        break
-                    elif len(chunk) % 16 != 0:
-                        chunk += b' ' * (16 - (len(chunk) % 16))
-                    outfile.write(encryptor.encrypt(chunk))
-        return outFile

+ 0 - 10
Target/Hash/base64_2.py

@@ -1,10 +0,0 @@
-def encryptFile():
-    myFile = input("enter file to encrypt: ")
-    file = open(myFile,"r")
-    contents = file.read()
-    contents = contents.encode()
-    file = open(myFile, "w")
-    encoded = base64.b64encode(contents)
-    # the .decode() converts the bytes to str, taking off the b'...'
-    file.write(str(encoded))
-    print ("File is now encrypted... and the contents is unreadable")

+ 0 - 14
Target/Hash/base64_3.py

@@ -1,14 +0,0 @@
-def decryptMessage():
-    pwd = "N3VIQUJmZ2pyNDVkZDRvMzNkZmd0NzBkZzlLOWRmcjJ0NWhCdmRm"
-    key = base64.b64decode(pwd) #the decoded version of this is the key.
-    value = input("Enter the decryption key: ").encode()
-    if value == key:
-        time.sleep(1)
-        message = input("Enter the message to decode: ")
-        decoded = base64.b64decode(message)
-        print (decoded)
-        menu()
-        
-    else:
-        print("Decryption key is wrong.")
-        menu()

+ 0 - 5
Target/Hash/base64_4.py

@@ -1,5 +0,0 @@
-def encrypt():
-    password = input("Enter a message: ").encode()
-    encoded = base64.b64encode(password)
-    print (encoded.decode()) 
-    menu()

+ 0 - 3
Target/Hash/base64_5.py

@@ -1,3 +0,0 @@
-def hashing(password):
-    hash1 = hashlib.md5(str.encode(password)).hexdigest()
-    print ("your hashed password is:", hash1,"\n")

+ 0 - 69
Target/Hash/biometry_hash_5.py

@@ -1,69 +0,0 @@
-def fp_search():
-        
-    """
-    PyFingerprint
-    Copyright (C) 2015 Bastian Raschke <bastian.raschke@posteo.de>
-    All rights reserved.
-
-    @author: Bastian Raschke <bastian.raschke@posteo.de>
-    """
-
-
-    ## Search for a finger
-    ##
-
-    ## Tries to initialize the sensor
-    try:
-        f = PyFingerprint('/dev/ttyUSB0', 57600, 0xFFFFFFFF, 0x00000000)
-
-        if ( f.verifyPassword() == False ):
-            raise ValueError('The given fingerprint sensor password is wrong!')
-
-    except Exception as e:
-        print('The fingerprint sensor could not be initialized!')
-        print('Exception message: ' + str(e))
-        exit(1)
-
-    ## Gets some sensor information
-    print('Currently stored templates: ' + str(f.getTemplateCount()))
-
-    ## Tries to search the finger and calculate hash
-    try:
-        print('Waiting for finger...')
-
-        ## Wait that finger is read
-        while ( f.readImage() == False ):
-            pass
-
-        ## Converts read image to characteristics and stores it in charbuffer 1
-        f.convertImage(0x01)
-
-        ## Searchs template
-        result = f.searchTemplate()
-
-        positionNumber = result[0]
-        accuracyScore = result[1]
-
-        if ( positionNumber == -1 ):
-            print('No match found!')
-            exit(0)
-        else:
-            print('Found template at position #' + str(positionNumber))
-            print('The accuracy score is: ' + str(accuracyScore))
-
-        ## OPTIONAL stuff
-        ##
-
-        ## Loads the found template to charbuffer 1
-        f.loadTemplate(positionNumber, 0x01)
-
-        ## Downloads the characteristics of template loaded in charbuffer 1
-        characterics = str(f.downloadCharacteristics(0x01))
-
-        ## Hashes characteristics of template
-        print('SHA-2 hash of template: ' + hashlib.sha256(characterics).hexdigest())
-
-    except Exception as e:
-        print('Operation failed!')
-        print('Exception message: ' + str(e))
-        exit(1)

+ 0 - 27
Target/Hash/biometry_hash_8.py

@@ -1,27 +0,0 @@
-def encrypt(key, filename):
-	chunksize = 64*1024
-	#print filename
-	#print "4th time: ", key
-	outputFile = "(encrypted)"+filename
-	filesize = str(os.path.getsize(filename)).zfill(16)
-	IV = ''
-
-	for i in range(16):
-		IV += chr(random.randint(0, 0xFF))
-
-	encryptor = AES.new(key, AES.MODE_CBC, IV)
-
-	with open(filename, 'rb') as infile:
-		with open(outputFile, 'wb') as outfile:
-			outfile.write(filesize)
-			outfile.write(IV)
-			
-			while True:
-				chunk = infile.read(chunksize)
-				
-				if len(chunk) == 0:
-					break
-				elif len(chunk) % 16 != 0:
-					chunk += ' ' * (16 - (len(chunk) % 16))
-
-				outfile.write(encryptor.encrypt(chunk))

+ 0 - 12
Target/Hash/crypto_4.py

@@ -1,12 +0,0 @@
-def __init__(self, data=None, truncate_to=None):
-        """ SHA-265d against length-extensions-attacks
-            with optional truncation of the hash
-
-        Args:
-            data: Initial string, optional
-            truncate_to: length to truncate the hash to, optional
-        """
-        self.h = sha256()
-        self.truncate_to = truncate_to
-        if data:
-            self.h.update(data)

+ 0 - 2
Target/Hash/crypto_6.py

@@ -1,2 +0,0 @@
-def digest(self):
-        return sha256(self.h.digest()).digest()[:self.truncate_to]

+ 0 - 2
Target/Hash/crypto_7.py

@@ -1,2 +0,0 @@
-def hexdigest(self):
-        return self.digest().encode('hex')

+ 0 - 7
Target/Hash/dirist_14.py

@@ -1,7 +0,0 @@
-def hasher(key):
-	try:
-		key = key.encode()
-		x = hashlib.sha224(key).hexdigest()
-		return(int(str(x),16))
-	except:
-		return -1

+ 0 - 11
Target/Hash/dirist_15.py

@@ -1,11 +0,0 @@
-def encrypt(key,text,dec=1):
-	if __name__ == '__main__':
-		encrypted = ""
-		key = hasher(key)
-		dupkey = key
-		for i in range(len(text)):
-			new = chr(ord(text[i])+ dec*(dupkey%10))
-			encrypted = encrypted + new
-			if dupkey==0: dupkey = key
-			dupkey = int(dupkey/10)
-		return encrypted

+ 0 - 4
Target/Hash/hash_1.py

@@ -1,4 +0,0 @@
-def md5(string):
-    m = hashlib.md5()
-    m.update(string)
-    return m.digest()

+ 0 - 4
Target/Hash/hash_2.py

@@ -1,4 +0,0 @@
-def sha256(string):
-    s = hashlib.sha256()
-    s.update(string)
-    return s.digest()

+ 0 - 21
Target/Hash/hash_3.py

@@ -1,21 +0,0 @@
-def hash(argc):
-    """hash: various hashing functions.
-    
-    Usage:
-        hash (md5|sha256) FILE
-        hash (md5|sha256) --string STRING
-    """
-    
-    if argc.args['FILE']:
-        if argc.args['md5']:
-            return md5(open(argc.args['FILE']).read())
-
-        elif argc.args['sha256']:
-            return sha256(open(argc.args['FILE']).read())
-
-    elif argc.args['--string']:
-        if argc.args['md5']:
-            return md5(argc.args['STRING'])
-
-        elif argc.args['sha256']:
-            return sha256(argc.args['STRING'])

+ 0 - 27
Target/Hash/md5_encryption_1.py

@@ -1,27 +0,0 @@
-def md5_encrypt(data, key):
-	if len(data) % 16 != 0:
-		data += (16 - (len(data) % 16)) * '\x00'
-
-	hash_block = []
-
-	output = ''
-	last_hash = ''
-	for c in data:
-		md5_ctx = hashlib.md5()
-		md5_ctx.update(c)
-		md5_ctx.update(key)
-		md5_ctx.update(last_hash)
-		hash_block.append(md5_ctx.digest())
-		last_hash = hash_block[-1]
-		if len(hash_block) == 16:
-			cur_block = ''
-			for b in hash_block:
-				cur_block += b
-
-			md5_ctx = hashlib.md5()
-			md5_ctx.update(cur_block)
-			last_hash = md5_ctx.digest()
-			output += cur_block + last_hash
-			hash_block = []
-
-	return output

+ 0 - 14
Target/Hash/simple-hash_2.py

@@ -1,14 +0,0 @@
-def hash_pwd (string, salt=None):
-  msg = []
-
-  if salt == None:
-    salt = gen_salt(20)
-
-  for i, c in enumerate(string):
-    char_s = ord(salt[len(string) - i])
-    char_string = ord(c)
-    msg.append(chr((char_s + char_string) % 127))
-
-  res = ''.join(msg)
-
-  return (res + salt, salt)

+ 0 - 23
Target/Pseudonym/anonymize_3.py

@@ -1,23 +0,0 @@
-def find_nhs_numbers(fn):
-    try:
-        f = open(fn, 'rb')
-    except IOError:
-        return None
-    locations = []
-    num = 0
-    while True:
-        c = f.read(1)
-        if c == '':
-            break
-        ascii_ = ord(c)
-
-        if ascii_ in (48, 49, 50, 51, 52, 53, 54, 55, 56, 57):
-            num += 1
-        else:
-            if num == 10:
-                startLocation = f.tell() - 11
-                f.seek(startLocation)
-                if validate_nhs_number(f.read(10)):
-                    locations.append(startLocation)
-            num = 0
-    return locations

+ 0 - 13
Target/Pseudonym/anonymize_5.py

@@ -1,13 +0,0 @@
-def replace_nhs_numbers(fn, locations):
-
-    outFn = os.path.join(os.path.dirname(fn), "ANON_" + os.path.basename(fn))
-    with open(fn, 'rb') as f, open(outFn, 'wb') as out:
-
-        for location in locations:
-            buf = f.read(location - f.tell())
-            out.write(buf)
-
-            pseudo = str(get_pseudonym((int(f.read(10)))))
-            out.write(pseudo)
-
-        out.write(f.read())

+ 0 - 42
Target/Pseudonym/anonymize_6.py

@@ -1,42 +0,0 @@
-def get_pseudonym(nhs_number):
-    global GENERATE_NEW
-
-    pseudo = LOOKUP.get(nhs_number)
-    if pseudo is not None:
-        return pseudo
-
-    if GENERATE_NEW is not True:
-        print("I have encountered a new NHS number (%d) with no pseudonym.\n"
-              "Should I generate new ones for any new NHS numbers I find "
-              "from now on?" % nhs_number)
-        response = raw_input("type y or n:")
-        if response == 'y':
-            GENERATE_NEW = True
-        else:
-            print("In that case, I will exit now.")
-            exit()
-
-    while True:
-        digits = []
-        s = ''
-        tot = 0
-        for i in range(9):
-            if i == 0:
-                digit = random.randint(1, 9)
-            else:
-                digit = random.randint(0, 9)
-            digits.append(digit)
-            s += str(digit)
-            tot += digit * (10 - i)  # (10 - i) is the weighting factor
-
-        checksum = 11 - (tot % 11)
-
-        if checksum == 11:
-            checksum = 0
-        if checksum != 10:  # 10 is an invalid nhs number
-            s += str(checksum)
-
-            pseudo = int(s)
-            LOOKUP[nhs_number] = pseudo
-
-            return pseudo

+ 0 - 20
Target/Pseudonym/dataFrameAnonymizer_3.py

@@ -1,20 +0,0 @@
-def anonymize(self, df, k, l=0):
-
-        # Check inputs
-        if df is None or len(df) == 0:
-            raise Exception("Dataframe is empty")
-        if self.sensitive_attribute_columns is None or len(self.sensitive_attribute_columns) == 0:
-            raise Exception("Provide at least one sensitive attribute column")
-
-        if not self.feature_columns:
-            self.init_feature_colums(df)
-
-        if self.avg_columns:
-            for c in self.avg_columns:
-                if not is_numeric_dtype(df[c]):
-                    raise Exception("Column " + c + " is not numeric and average cannot be calculated.")
-
-        mondrian = MondrianAnonymizer(df, self.feature_columns, self.sensitive_attribute_columns)
-        partitions = mondrian.partition(k, l)
-        dfa = self.build_anonymized_dataframe(df, partitions)
-        return dfa

+ 0 - 5
Target/Pseudonym/pseudodepseudonimizer_2.py

@@ -1,5 +0,0 @@
-def dots2numberedDots(all_text, replace_string="..."):
-    replace_string = re.escape(replace_string)
-    dots_regex = re.compile(r"(\s)[A-Z]?\[?({})\]?(\s)".format(replace_string))
-    all_text = dots_regex.sub(dots_repl, all_text)
-    return all_text

+ 1 - 1
utils/splitfile.py

@@ -40,4 +40,4 @@ def split_file(file_dir, output_dir, endpoint=".py"):
 
 
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
-    split_file("../Azure", "../Target/Azure")
+    split_file("../File", "../Target/File")