Dataset Viewer
Auto-converted to Parquet Duplicate
repo_name
stringlengths
13
51
branch_name
stringclasses
4 values
import_graph
stringlengths
40
15.7k
directory_tree
stringlengths
125
5.52k
file_1_name
stringlengths
5
79
file_1_content
stringlengths
27
58.8k
file_2_name
stringlengths
6
89
file_2_content
stringlengths
16
33.4k
file_3_name
stringlengths
6
79
file_3_content
stringlengths
60
29.4k
file_4_name
stringlengths
6
85
file_4_content
stringlengths
48
22.1k
file_5_name
stringlengths
6
86
file_5_content
stringlengths
38
27.8k
file_6_name
stringlengths
7
103
file_6_content
stringlengths
39
37.3k
file_7_name
stringlengths
7
99
file_7_content
stringlengths
66
41.3k
file_8_name
stringlengths
8
98
file_8_content
stringlengths
117
51.9k
file_9_name
stringlengths
8
96
file_9_content
stringlengths
75
27.4k
file_10_name
stringlengths
9
95
file_10_content
stringlengths
62
18.5k
file_11_name
stringlengths
8
93
file_11_content
stringlengths
25
14.1k
file_12_name
stringlengths
7
94
file_12_content
stringlengths
118
13.2k
file_13_name
stringlengths
8
98
file_13_content
stringlengths
35
29.6k
file_14_name
stringlengths
9
88
file_14_content
stringlengths
37
16.6k
file_15_name
stringlengths
10
100
file_15_content
stringlengths
90
31.5k
file_16_name
stringlengths
9
99
file_16_content
stringlengths
384
20.1k
file_17_name
stringlengths
14
90
file_17_content
stringlengths
202
14.2k
file_18_name
stringlengths
9
97
file_18_content
stringlengths
107
34.6k
file_19_name
stringlengths
9
97
file_19_content
stringlengths
146
26.4k
file_20_name
stringlengths
10
100
file_20_content
stringlengths
474
21.4k
file_21_name
stringclasses
19 values
file_21_content
stringclasses
19 values
file_22_name
stringclasses
16 values
file_22_content
stringclasses
16 values
file_23_name
stringclasses
14 values
file_23_content
stringclasses
14 values
file_24_name
stringclasses
10 values
file_24_content
stringclasses
10 values
file_25_name
stringclasses
9 values
file_25_content
stringclasses
9 values
file_26_name
stringclasses
9 values
file_26_content
stringclasses
9 values
file_27_name
stringclasses
7 values
file_27_content
stringclasses
7 values
file_28_name
stringclasses
7 values
file_28_content
stringclasses
7 values
file_29_name
stringclasses
7 values
file_29_content
stringclasses
7 values
file_30_name
stringclasses
6 values
file_30_content
stringclasses
6 values
file_31_name
stringclasses
6 values
file_31_content
stringclasses
6 values
file_32_name
stringclasses
6 values
file_32_content
stringclasses
6 values
file_33_name
stringclasses
6 values
file_33_content
stringclasses
6 values
file_34_name
stringclasses
5 values
file_34_content
stringclasses
5 values
file_35_name
stringclasses
5 values
file_35_content
stringclasses
5 values
file_36_name
stringclasses
4 values
file_36_content
stringclasses
4 values
file_37_name
stringclasses
4 values
file_37_content
stringclasses
4 values
file_38_name
stringclasses
4 values
file_38_content
stringclasses
4 values
file_39_name
stringclasses
4 values
file_39_content
stringclasses
4 values
file_40_name
stringclasses
4 values
file_40_content
stringclasses
4 values
file_41_name
stringclasses
4 values
file_41_content
stringclasses
4 values
file_42_name
stringclasses
4 values
file_42_content
stringclasses
4 values
file_43_name
stringclasses
4 values
file_43_content
stringclasses
4 values
file_44_name
stringclasses
4 values
file_44_content
stringclasses
4 values
file_45_name
stringclasses
4 values
file_45_content
stringclasses
4 values
file_46_name
stringclasses
4 values
file_46_content
stringclasses
4 values
file_47_name
stringclasses
4 values
file_47_content
stringclasses
4 values
file_48_name
stringclasses
4 values
file_48_content
stringclasses
4 values
file_49_name
stringclasses
4 values
file_49_content
stringclasses
4 values
file_50_name
stringclasses
3 values
file_50_content
stringclasses
3 values
file_51_name
stringclasses
3 values
file_51_content
stringclasses
3 values
file_52_name
stringclasses
3 values
file_52_content
stringclasses
3 values
file_53_name
stringclasses
3 values
file_53_content
stringclasses
3 values
file_54_name
stringclasses
3 values
file_54_content
stringclasses
3 values
file_55_name
stringclasses
3 values
file_55_content
stringclasses
3 values
file_56_name
stringclasses
3 values
file_56_content
stringclasses
3 values
file_57_name
stringclasses
3 values
file_57_content
stringclasses
3 values
file_58_name
stringclasses
1 value
file_58_content
stringclasses
1 value
file_59_name
stringclasses
1 value
file_59_content
stringclasses
1 value
file_60_name
stringclasses
1 value
file_60_content
stringclasses
1 value
file_61_name
stringclasses
1 value
file_61_content
stringclasses
1 value
file_62_name
stringclasses
1 value
file_62_content
stringclasses
1 value
file_63_name
stringclasses
1 value
file_63_content
stringclasses
1 value
file_64_name
stringclasses
1 value
file_64_content
stringclasses
1 value
file_65_name
stringclasses
1 value
file_65_content
stringclasses
1 value
file_66_name
stringclasses
1 value
file_66_content
stringclasses
1 value
file_67_name
stringclasses
1 value
file_67_content
stringclasses
1 value
file_68_name
stringclasses
1 value
file_68_content
stringclasses
1 value
file_69_name
stringclasses
1 value
file_69_content
stringclasses
1 value
file_70_name
stringclasses
1 value
file_70_content
stringclasses
1 value
file_71_name
stringclasses
1 value
file_71_content
stringclasses
1 value
file_72_name
stringclasses
1 value
file_72_content
stringclasses
1 value
file_73_name
stringclasses
1 value
file_73_content
stringclasses
1 value
file_74_name
stringclasses
1 value
file_74_content
stringclasses
1 value
file_75_name
stringclasses
1 value
file_75_content
stringclasses
1 value
file_76_name
stringclasses
1 value
file_76_content
stringclasses
1 value
file_77_name
stringclasses
1 value
file_77_content
stringclasses
1 value
file_78_name
stringclasses
1 value
file_78_content
stringclasses
1 value
file_79_name
stringclasses
1 value
file_79_content
stringclasses
1 value
file_80_name
stringclasses
1 value
file_80_content
stringclasses
1 value
file_81_name
stringclasses
1 value
file_81_content
stringclasses
1 value
file_82_name
stringclasses
1 value
file_82_content
stringclasses
1 value
file_83_name
stringclasses
1 value
file_83_content
stringclasses
1 value
file_84_name
stringclasses
1 value
file_84_content
stringclasses
1 value
file_85_name
stringclasses
1 value
file_85_content
stringclasses
1 value
file_86_name
stringclasses
1 value
file_86_content
stringclasses
1 value
file_87_name
stringclasses
1 value
file_87_content
stringclasses
1 value
file_88_name
stringclasses
1 value
file_88_content
stringclasses
1 value
file_89_name
stringclasses
1 value
file_89_content
stringclasses
1 value
file_90_name
stringclasses
1 value
file_90_content
stringclasses
1 value
file_91_name
stringclasses
1 value
file_91_content
stringclasses
1 value
file_92_name
stringclasses
1 value
file_92_content
stringclasses
1 value
file_93_name
stringclasses
1 value
file_93_content
stringclasses
1 value
file_94_name
stringclasses
1 value
file_94_content
stringclasses
1 value
file_95_name
stringclasses
1 value
file_95_content
stringclasses
1 value
file_96_name
stringclasses
1 value
file_96_content
stringclasses
1 value
file_97_name
stringclasses
1 value
file_97_content
stringclasses
1 value
shuishen112/pairwise-rnn
refs/heads/master
{"/run.py": ["/data_helper.py", "/config.py"], "/test.py": ["/data_helper.py", "/config.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"]}
└── ├── config.py ├── data_helper.py ├── main.py ├── models │ ├── QA_CNN_pairwise.py │ ├── __init__.py │ └── my │ └── nn.py ├── run.py └── test.py
/config.py
class Singleton(object): __instance=None def __init__(self): pass def getInstance(self): if Singleton.__instance is None: # Singleton.__instance=object.__new__(cls,*args,**kwd) Singleton.__instance=self.get_test_flag() print("build FLAGS over") return Singleton.__instance def get_test_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "cnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_rnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "rnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") # flags.DEFINE_string('data','8008','data set') flags.DEFINE_string('data','trec','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',False,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_cnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "cnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_qcnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "qcnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','mean','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_8008_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "rnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','8008','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',False,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS if __name__=="__main__": args=Singleton().get_test_flag() for attr, value in sorted(args.__flags.items()): print(("{}={}".format(attr.upper(), value)))
/data_helper.py
#-*- coding:utf-8 -*- import os import numpy as np import tensorflow as tf import string from collections import Counter import pandas as pd from tqdm import tqdm import random from functools import wraps import time import pickle def log_time_delta(func): @wraps(func) def _deco(*args, **kwargs): start = time.time() ret = func(*args, **kwargs) end = time.time() delta = end - start print( "%s runed %.2f seconds"% (func.__name__,delta)) return ret return _deco import tqdm from nltk.corpus import stopwords OVERLAP = 237 class Alphabet(dict): def __init__(self, start_feature_id = 1): self.fid = start_feature_id def add(self, item): idx = self.get(item, None) if idx is None: idx = self.fid self[item] = idx # self[idx] = item self.fid += 1 return idx def dump(self, fname): with open(fname, "w") as out: for k in sorted(self.keys()): out.write("{}\t{}\n".format(k, self[k])) def cut(sentence): tokens = sentence.lower().split() # tokens = [w for w in tokens if w not in stopwords.words('english')] return tokens @log_time_delta def load(dataset, filter = False): data_dir = "data/" + dataset datas = [] for data_name in ['train.txt','test.txt','dev.txt']: data_file = os.path.join(data_dir,data_name) data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0') # data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0') if filter == True: datas.append(removeUnanswerdQuestion(data)) else: datas.append(data) # sub_file = os.path.join(data_dir,'submit.txt') # submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3) # datas.append(submit) return tuple(datas) @log_time_delta def removeUnanswerdQuestion(df): counter= df.groupby("question").apply(lambda group: sum(group["flag"])) questions_have_correct=counter[counter>0].index counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0)) questions_have_uncorrect=counter[counter>0].index counter=df.groupby("question").apply(lambda group: len(group["flag"])) questions_multi=counter[counter>1].index return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index() @log_time_delta def get_alphabet(corpuses=None,dataset=""): pkl_name="temp/"+dataset+".alphabet.pkl" if os.path.exists(pkl_name): return pickle.load(open(pkl_name,"rb")) alphabet = Alphabet(start_feature_id = 0) alphabet.add('[UNK]') alphabet.add('END') count = 0 for corpus in corpuses: for texts in [corpus["question"].unique(),corpus["answer"]]: for sentence in texts: tokens = cut(sentence) for token in set(tokens): alphabet.add(token) print("alphabet size %d" % len(alphabet.keys()) ) if not os.path.exists("temp"): os.mkdir("temp") pickle.dump( alphabet,open(pkl_name,"wb")) return alphabet @log_time_delta def getSubVectorsFromDict(vectors,vocab,dim = 300): embedding = np.zeros((len(vocab),dim)) count = 1 for word in vocab: if word in vectors: count += 1 embedding[vocab[word]]= vectors[word] else: embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist() print( 'word in embedding',count) return embedding def encode_to_split(sentence,alphabet): indices = [] tokens = cut(sentence) seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens] return seq @log_time_delta def load_text_vec(alphabet,filename="",embedding_size = 100): vectors = {} with open(filename,encoding='utf-8') as f: i = 0 for line in f: i += 1 if i % 100000 == 0: print( 'epch %d' % i) items = line.strip().split(' ') if len(items) == 2: vocab_size, embedding_size= items[0],items[1] print( ( vocab_size, embedding_size)) else: word = items[0] if word in alphabet: vectors[word] = items[1:] print( 'embedding_size',embedding_size) print( 'done') print( 'words found in wor2vec embedding ',len(vectors.keys())) return vectors @log_time_delta def get_embedding(alphabet,dim = 300,language ="en",dataset=""): pkl_name="temp/"+dataset+".subembedding.pkl" if os.path.exists(pkl_name): return pickle.load(open(pkl_name,"rb")) if language=="en": fname = 'embedding/glove.6B/glove.6B.300d.txt' else: fname= "embedding/embedding.200.header_txt" embeddings = load_text_vec(alphabet,fname,embedding_size = dim) sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim) pickle.dump( sub_embeddings,open(pkl_name,"wb")) return sub_embeddings @log_time_delta def get_mini_batch_test(df,alphabet,batch_size): q = [] a = [] pos_overlap = [] for index,row in df.iterrows(): question = encode_to_split(row["question"],alphabet) answer = encode_to_split(row["answer"],alphabet) overlap_pos = overlap_index(row['question'],row['answer']) q.append(question) a.append(answer) pos_overlap.append(overlap_pos) m = 0 n = len(q) idx_list = np.arange(m,n,batch_size) mini_batches = [] for idx in idx_list: mini_batches.append(np.arange(idx,min(idx + batch_size,n))) for mini_batch in mini_batches: mb_q = [ q[t] for t in mini_batch] mb_a = [ a[t] for t in mini_batch] mb_pos_overlap = [pos_overlap[t] for t in mini_batch] mb_q,mb_q_mask = prepare_data(mb_q) mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap) yield(mb_q,mb_a) # calculate the overlap_index def overlap_index(question,answer,stopwords = []): ans_token = cut(answer) qset = set(cut(question)) aset = set(ans_token) a_len = len(ans_token) # q_index = np.arange(1,q_len) a_index = np.arange(1,a_len + 1) overlap = qset.intersection(aset) # for i,q in enumerate(cut(question)[:q_len]): # value = 1 # if q in overlap: # value = 2 # q_index[i] = value for i,a in enumerate(ans_token): if a in overlap: a_index[i] = OVERLAP return a_index def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False): q,a,neg_a=[],[],[] answers=df["answer"][:250] ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict() for question in tqdm(df['question'].unique()): index= ground_truth[question] canindates = [i for i in range(250)] canindates.remove(index) a_neg_index = random.choice(canindates) seq_q = encode_to_split(question,alphabet) seq_a = encode_to_split(answers[index],alphabet) seq_neg_a = encode_to_split(answers[a_neg_index],alphabet) q.append(seq_q) a.append( seq_a) neg_a.append(seq_neg_a ) return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle) def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False): if sort_by_len: sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True) q = [ q[i] for i in sorted_index] a = [a[i] for i in sorted_index] neg_a = [ neg_a[i] for i in sorted_index] pos_overlap = [pos_overlap[i] for i in sorted_index] neg_overlap = [neg_overlap[i] for i in sorted_index] #get batch m = 0 n = len(q) idx_list = np.arange(m,n,batch_size) if shuffle: np.random.shuffle(idx_list) mini_batches = [] for idx in idx_list: mini_batches.append(np.arange(idx,min(idx + batch_size,n))) for mini_batch in tqdm(mini_batches): mb_q = [ q[t] for t in mini_batch] mb_a = [ a[t] for t in mini_batch] mb_neg_a = [ neg_a[t] for t in mini_batch] mb_pos_overlap = [pos_overlap[t] for t in mini_batch] mb_neg_overlap = [neg_overlap[t] for t in mini_batch] mb_q,mb_q_mask = prepare_data(mb_q) mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap) mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap) # mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap) # mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a) yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask) def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None): q = [] a = [] neg_a = [] for question in df['question'].unique(): # group = df[df["question"]==question] # pos_answers = group[df["flag"] == 1]["answer"] # neg_answers = group[df["flag"] == 0]["answer"].reset_index() group = df[df["question"]==question] pos_answers = group[group["flag"] == 1]["answer"] neg_answers = group[group["flag"] == 0]["answer"]#.reset_index() for pos in pos_answers: if model is not None and sess is not None: pos_sent= encode_to_split(pos,alphabet) q_sent,q_mask= prepare_data([pos_sent]) neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers] a_sent,a_mask= prepare_data(neg_sents) scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask)) neg_index = scores.argmax() else: if len(neg_answers.index) > 0: neg_index = np.random.choice(neg_answers.index) neg = neg_answers.reset_index().loc[neg_index,]["answer"] seq_q = encode_to_split(question,alphabet) seq_a = encode_to_split(pos,alphabet) seq_neg_a = encode_to_split(neg,alphabet) q.append(seq_q) a.append(seq_a) neg_a.append(seq_neg_a) return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle) def prepare_data(seqs,overlap = None): lengths = [len(seq) for seq in seqs] n_samples = len(seqs) max_len = np.max(lengths) x = np.zeros((n_samples,max_len)).astype('int32') if overlap is not None: overlap_position = np.zeros((n_samples,max_len)).astype('float') for idx ,seq in enumerate(seqs): x[idx,:lengths[idx]] = seq overlap_position[idx,:lengths[idx]] = overlap[idx] return x,overlap_position else: x_mask = np.zeros((n_samples, max_len)).astype('float') for idx, seq in enumerate(seqs): x[idx, :lengths[idx]] = seq x_mask[idx, :lengths[idx]] = 1.0 # print( x, x_mask) return x, x_mask # def prepare_data(seqs): # lengths = [len(seq) for seq in seqs] # n_samples = len(seqs) # max_len = np.max(lengths) # x = np.zeros((n_samples, max_len)).astype('int32') # x_mask = np.zeros((n_samples, max_len)).astype('float') # for idx, seq in enumerate(seqs): # x[idx, :lengths[idx]] = seq # x_mask[idx, :lengths[idx]] = 1.0 # # print( x, x_mask) # return x, x_mask def getLogger(): import sys import logging import os import time now = int(time.time()) timeArray = time.localtime(now) timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray) log_filename = "log/" +time.strftime("%Y%m%d", timeArray) program = os.path.basename(sys.argv[0]) logger = logging.getLogger(program) if not os.path.exists(log_filename): os.mkdir(log_filename) logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w') logging.root.setLevel(level=logging.INFO) logger.info("running %s" % ' '.join(sys.argv)) return logger
/main.py
import data_helper import time import datetime import os import tensorflow as tf import numpy as np import evaluation now = int(time.time()) timeArray = time.localtime(now) timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray) timeDay = time.strftime("%Y%m%d", timeArray) print (timeStamp) def main(args): args._parse_flags() print("\nParameters:") for attr, value in sorted(args.__flags.items()): print(("{}={}".format(attr.upper(), value))) log_dir = 'log/'+ timeDay if not os.path.exists(log_dir): os.makedirs(log_dir) data_file = log_dir + '/test_' + args.data + timeStamp precision = data_file + 'precise' print('load data ...........') train,test,dev = data_helper.load(args.data,filter = args.clean) q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split())) a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split())) alphabet = data_helper.get_alphabet([train,test,dev]) print('the number of words',len(alphabet)) print('get embedding') if args.data=="quora": embedding = data_helper.get_embedding(alphabet,language="cn") else: embedding = data_helper.get_embedding(alphabet) with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)): # with tf.device("/cpu:0"): session_conf = tf.ConfigProto() session_conf.allow_soft_placement = args.allow_soft_placement session_conf.log_device_placement = args.log_device_placement session_conf.gpu_options.allow_growth = True sess = tf.Session(config=session_conf) model = QA_CNN_extend(max_input_left = q_max_sent_length, max_input_right = a_max_sent_length, batch_size = args.batch_size, vocab_size = len(alphabet), embedding_size = args.embedding_dim, filter_sizes = list(map(int, args.filter_sizes.split(","))), num_filters = args.num_filters, hidden_size = args.hidden_size, dropout_keep_prob = args.dropout_keep_prob, embeddings = embedding, l2_reg_lambda = args.l2_reg_lambda, trainable = args.trainable, pooling = args.pooling, conv = args.conv) model.build_graph() sess.run(tf.global_variables_initializer()) def train_step(model,sess,batch): for data in batch: feed_dict = { model.question:data[0], model.answer:data[1], model.answer_negative:data[2], model.q_mask:data[3], model.a_mask:data[4], model.a_neg_mask:data[5] } _, summary, step, loss, accuracy,score12, score13, see = sess.run( [model.train_op, model.merged,model.global_step,model.loss, model.accuracy,model.score12,model.score13, model.see], feed_dict) time_str = datetime.datetime.now().isoformat() print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13))) def predict(model,sess,batch,test): scores = [] for data in batch: feed_dict = { model.question:data[0], model.answer:data[1], model.q_mask:data[2], model.a_mask:data[3] } score = sess.run( model.score12, feed_dict) scores.extend(score) return np.array(scores[:len(test)]) for i in range(args.num_epoches): datas = data_helper.get_mini_batch(train,alphabet,args.batch_size) train_step(model,sess,datas) test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size) predicted_test = predict(model,sess,test_datas,test) print(len(predicted_test)) print(len(test)) map_mrr_test = evaluation.evaluationBypandas(test,predicted_test) print('map_mrr test',map_mrr_test)
/models/QA_CNN_pairwise.py
#coding:utf-8 import tensorflow as tf import numpy as np from tensorflow.contrib import rnn import models.blocks as blocks # model_type :apn or qacnn class QA_CNN_extend(object): # def __init__(self,max_input_left,max_input_right,batch_size,vocab_size,embedding_size,filter_sizes,num_filters,hidden_size, # dropout_keep_prob = 1,learning_rate = 0.001,embeddings = None,l2_reg_lambda = 0.0,trainable = True,pooling = 'attentive',conv = 'narrow'): # # """ # QA_RNN model for question answering # # Args: # self.dropout_keep_prob: dropout rate # self.num_filters : number of filters # self.para : parameter list # self.extend_feature_dim : my extend feature dimension # self.max_input_left : the length of question # self.max_input_right : the length of answer # self.pooling : pooling strategy :max pooling or attentive pooling # # """ # self.dropout_keep_prob = tf.placeholder(tf.float32,name = 'dropout_keep_prob') # self.num_filters = num_filters # self.embeddings = embeddings # self.embedding_size = embedding_size # self.batch_size = batch_size # self.filter_sizes = filter_sizes # self.l2_reg_lambda = l2_reg_lambda # self.para = [] # # self.max_input_left = max_input_left # self.max_input_right = max_input_right # self.trainable = trainable # self.vocab_size = vocab_size # self.pooling = pooling # self.total_num_filter = len(self.filter_sizes) * self.num_filters # # self.conv = conv # self.pooling = 'traditional' # self.learning_rate = learning_rate # # self.hidden_size = hidden_size # # self.attention_size = 100 def __init__(self,opt): for key,value in opt.items(): self.__setattr__(key,value) self.attention_size = 100 self.pooling = 'mean' self.total_num_filter = len(self.filter_sizes) * self.num_filters self.para = [] self.dropout_keep_prob_holder = tf.placeholder(tf.float32,name = 'dropout_keep_prob') def create_placeholder(self): print(('Create placeholders')) # he length of the sentence is varied according to the batch,so the None,None self.question = tf.placeholder(tf.int32,[None,None],name = 'input_question') self.max_input_left = tf.shape(self.question)[1] self.batch_size = tf.shape(self.question)[0] self.answer = tf.placeholder(tf.int32,[None,None],name = 'input_answer') self.max_input_right = tf.shape(self.answer)[1] self.answer_negative = tf.placeholder(tf.int32,[None,None],name = 'input_right') # self.q_mask = tf.placeholder(tf.int32,[None,None],name = 'q_mask') # self.a_mask = tf.placeholder(tf.int32,[None,None],name = 'a_mask') # self.a_neg_mask = tf.placeholder(tf.int32,[None,None],name = 'a_neg_mask') def add_embeddings(self): print( 'add embeddings') if self.embeddings is not None: print( "load embedding") W = tf.Variable(np.array(self.embeddings),name = "W" ,dtype="float32",trainable = self.trainable) else: print( "random embedding") W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),name="W",trainable = self.trainable) self.embedding_W = W # self.overlap_W = tf.Variable(a,name="W",trainable = True) self.para.append(self.embedding_W) self.q_embedding = tf.nn.embedding_lookup(self.embedding_W,self.question) self.a_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer) self.a_neg_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer_negative) #real length self.q_len,self.q_mask = blocks.length(self.question) self.a_len,self.a_mask = blocks.length(self.answer) self.a_neg_len,self.a_neg_mask = blocks.length(self.answer_negative) def convolution(self): print( 'convolution:wide_convolution') self.kernels = [] for i,filter_size in enumerate(self.filter_sizes): with tf.name_scope('conv-max-pool-%s' % filter_size): filter_shape = [filter_size,self.embedding_size,1,self.num_filters] W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name="W") b = tf.Variable(tf.constant(0.0, shape=[self.num_filters]), name="b") self.kernels.append((W,b)) self.para.append(W) self.para.append(b) embeddings = [self.q_embedding,self.a_embedding,self.a_neg_embedding] self.q_cnn,self.a_cnn,self.a_neg_cnn = [self.wide_convolution(tf.expand_dims(embedding,-1)) for embedding in embeddings] #convolution def pooling_graph(self): if self.pooling == 'mean': self.q_pos_cnn = self.mean_pooling(self.q_cnn,self.q_mask) self.q_neg_cnn = self.mean_pooling(self.q_cnn,self.q_mask) self.a_pos_cnn = self.mean_pooling(self.a_cnn,self.a_mask) self.a_neg_cnn = self.mean_pooling(self.a_neg_cnn,self.a_neg_mask) elif self.pooling == 'attentive': self.q_pos_cnn,self.a_pos_cnn = self.attentive_pooling(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask) self.q_neg_cnn,self.a_neg_cnn = self.attentive_pooling(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask) elif self.pooling == 'position': self.q_pos_cnn,self.a_pos_cnn = self.position_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask) self.q_neg_cnn,self.a_neg_cnn = self.position_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask) elif self.pooling == 'traditional': print( self.pooling) print(self.q_cnn) self.q_pos_cnn,self.a_pos_cnn = self.traditional_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask) self.q_neg_cnn,self.a_neg_cnn = self.traditional_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask) def para_initial(self): # print(("---------")) # self.W_qp = tf.Variable(tf.truncated_normal(shape = [self.hidden_size * 2,1],stddev = 0.01,name = 'W_qp')) self.U = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'U')) self.W_hm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_hm')) self.W_qm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_qm')) self.W_ms = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,1],stddev = 0.01,name = 'W_ms')) self.M_qi = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.embedding_size],stddev = 0.01,name = 'M_qi')) def mean_pooling(self,conv,mask): conv = tf.squeeze(conv,2) print( tf.expand_dims(tf.cast(mask,tf.float32),-1)) # conv_mask = tf.multiply(conv,tf.expand_dims(tf.cast(mask,tf.float32),-1)) # self.see = conv_mask # print( conv_mask) return tf.reduce_mean(conv,axis = 1); def attentive_pooling(self,input_left,input_right,q_mask,a_mask): Q = tf.squeeze(input_left,axis = 2) A = tf.squeeze(input_right,axis = 2) print( Q) print( A) # Q = tf.reshape(input_left,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters],name = 'Q') # A = tf.reshape(input_right,[-1,self.max_input_right,len(self.filter_sizes) * self.num_filters],name = 'A') # G = tf.tanh(tf.matmul(tf.matmul(Q,self.U),\ # A,transpose_b = True),name = 'G') first = tf.matmul(tf.reshape(Q,[-1,len(self.filter_sizes) * self.num_filters]),self.U) second_step = tf.reshape(first,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters]) result = tf.matmul(second_step,tf.transpose(A,perm = [0,2,1])) print( second_step) print( tf.transpose(A,perm = [0,2,1])) # print( 'result',result) G = tf.tanh(result) # G = result # column-wise pooling ,row-wise pooling row_pooling = tf.reduce_max(G,1,True,name = 'row_pooling') col_pooling = tf.reduce_max(G,2,True,name = 'col_pooling') self.attention_q = tf.nn.softmax(col_pooling,1,name = 'attention_q') self.attention_q_mask = tf.multiply(self.attention_q,tf.expand_dims(tf.cast(q_mask,tf.float32),-1)) self.attention_a = tf.nn.softmax(row_pooling,name = 'attention_a') self.attention_a_mask = tf.multiply(self.attention_a,tf.expand_dims(tf.cast(a_mask,tf.float32),1)) self.see = G R_q = tf.reshape(tf.matmul(Q,self.attention_q_mask,transpose_a = 1),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_q') R_a = tf.reshape(tf.matmul(self.attention_a_mask,A),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_a') return R_q,R_a def traditional_attention(self,input_left,input_right,q_mask,a_mask): input_left = tf.squeeze(input_left,axis = 2) input_right = tf.squeeze(input_right,axis = 2) input_left_mask = tf.multiply(input_left, tf.expand_dims(tf.cast(q_mask,tf.float32),2)) Q = tf.reduce_mean(input_left_mask,1) a_shape = tf.shape(input_right) A = tf.reshape(input_right,[-1,self.total_num_filter]) m_t = tf.nn.tanh(tf.reshape(tf.matmul(A,self.W_hm),[-1,a_shape[1],self.total_num_filter]) + tf.expand_dims(tf.matmul(Q,self.W_qm),1)) f_attention = tf.exp(tf.reshape(tf.matmul(tf.reshape(m_t,[-1,self.total_num_filter]),self.W_ms),[-1,a_shape[1],1])) self.f_attention_mask = tf.multiply(f_attention,tf.expand_dims(tf.cast(a_mask,tf.float32),2)) self.f_attention_norm = tf.divide(self.f_attention_mask,tf.reduce_sum(self.f_attention_mask,1,keep_dims = True)) self.see = self.f_attention_norm a_attention = tf.reduce_sum(tf.multiply(input_right,self.f_attention_norm),1) return Q,a_attention def position_attention(self,input_left,input_right,q_mask,a_mask): input_left = tf.squeeze(input_left,axis = 2) input_right = tf.squeeze(input_right,axis = 2) # Q = tf.reshape(input_left,[-1,self.max_input_left,self.hidden_size*2],name = 'Q') # A = tf.reshape(input_right,[-1,self.max_input_right,self.hidden_size*2],name = 'A') Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1) QU = tf.matmul(Q,self.U) QUA = tf.multiply(tf.expand_dims(QU,1),input_right) self.attention_a = tf.cast(tf.argmax(QUA,2) ,tf.float32) # q_shape = tf.shape(input_left) # Q_1 = tf.reshape(input_left,[-1,self.total_num_filter]) # QU = tf.matmul(Q_1,self.U) # QU_1 = tf.reshape(QU,[-1,q_shape[1],self.total_num_filter]) # A_1 = tf.transpose(input_right,[0,2,1]) # QUA = tf.matmul(QU_1,A_1) # QUA = tf.nn.l2_normalize(QUA,1) # G = tf.tanh(QUA) # Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1) # # self.Q_mask = tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)) # row_pooling = tf.reduce_max(G,1,name="row_pooling") # col_pooling = tf.reduce_max(G,2,name="col_pooling") # self.attention_a = tf.nn.softmax(row_pooling,1,name = "attention_a") self.attention_a_mask = tf.multiply(self.attention_a,tf.cast(a_mask,tf.float32)) self.see = self.attention_a self.attention_a_norm = tf.divide(self.attention_a_mask,tf.reduce_sum(self.attention_a_mask,1,keep_dims =True)) self.r_a = tf.reshape(tf.matmul(tf.transpose(input_right,[0,2,1]) ,tf.expand_dims(self.attention_a_norm,2)),[-1,self.total_num_filter]) return Q ,self.r_a def create_loss(self): with tf.name_scope('score'): self.score12 = self.getCosine(self.q_pos_cnn,self.a_pos_cnn) self.score13 = self.getCosine(self.q_neg_cnn,self.a_neg_cnn) l2_loss = tf.constant(0.0) for p in self.para: l2_loss += tf.nn.l2_loss(p) with tf.name_scope("loss"): self.losses = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.score12, self.score13))) self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * l2_loss tf.summary.scalar('loss', self.loss) # Accuracy with tf.name_scope("accuracy"): self.correct = tf.equal(0.0, self.losses) self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy") tf.summary.scalar('accuracy', self.accuracy) def create_op(self): self.global_step = tf.Variable(0, name = "global_step", trainable = False) self.optimizer = tf.train.AdamOptimizer(self.learning_rate) self.grads_and_vars = self.optimizer.compute_gradients(self.loss) self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step = self.global_step) def max_pooling(self,conv,input_length): pooled = tf.nn.max_pool( conv, ksize = [1, input_length, 1, 1], strides = [1, 1, 1, 1], padding = 'VALID', name="pool") return pooled def getCosine(self,q,a): pooled_flat_1 = tf.nn.dropout(q, self.dropout_keep_prob_holder) pooled_flat_2 = tf.nn.dropout(a, self.dropout_keep_prob_holder) pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_1), 1)) pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_2, pooled_flat_2), 1)) pooled_mul_12 = tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_2), 1) score = tf.div(pooled_mul_12, tf.multiply(pooled_len_1, pooled_len_2), name="scores") return score def wide_convolution(self,embedding): cnn_outputs = [] for i,filter_size in enumerate(self.filter_sizes): conv = tf.nn.conv2d( embedding, self.kernels[i][0], strides=[1, 1, self.embedding_size, 1], padding='SAME', name="conv-1" ) h = tf.nn.relu(tf.nn.bias_add(conv, self.kernels[i][1]), name="relu-1") cnn_outputs.append(h) cnn_reshaped = tf.concat(cnn_outputs,3) return cnn_reshaped def variable_summaries(self,var): with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev) tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var)) tf.summary.histogram('histogram', var) def build_graph(self): self.create_placeholder() self.add_embeddings() self.para_initial() self.convolution() self.pooling_graph() self.create_loss() self.create_op() self.merged = tf.summary.merge_all() def train(self,sess,data): feed_dict = { self.question:data[0], self.answer:data[1], self.answer_negative:data[2], # self.q_mask:data[3], # self.a_mask:data[4], # self.a_neg_mask:data[5], self.dropout_keep_prob_holder:self.dropout_keep_prob } _, summary, step, loss, accuracy,score12, score13, see = sess.run( [self.train_op, self.merged,self.global_step,self.loss, self.accuracy,self.score12,self.score13, self.see], feed_dict) return _, summary, step, loss, accuracy,score12, score13, see def predict(self,sess,data): feed_dict = { self.question:data[0], self.answer:data[1], # self.q_mask:data[2], # self.a_mask:data[3], self.dropout_keep_prob_holder:1.0 } score = sess.run( self.score12, feed_dict) return score if __name__ == '__main__': cnn = QA_CNN_extend( max_input_left = 33, max_input_right = 40, batch_size = 3, vocab_size = 5000, embedding_size = 100, filter_sizes = [3,4,5], num_filters = 64, hidden_size = 100, dropout_keep_prob = 1.0, embeddings = None, l2_reg_lambda = 0.0, trainable = True, pooling = 'max', conv = 'wide') cnn.build_graph() input_x_1 = np.reshape(np.arange(3 * 33),[3,33]) input_x_2 = np.reshape(np.arange(3 * 40),[3,40]) input_x_3 = np.reshape(np.arange(3 * 40),[3,40]) q_mask = np.ones((3,33)) a_mask = np.ones((3,40)) a_neg_mask = np.ones((3,40)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) feed_dict = { cnn.question:input_x_1, cnn.answer:input_x_2, # cnn.answer_negative:input_x_3, cnn.q_mask:q_mask, cnn.a_mask:a_mask, cnn.dropout_keep_prob_holder:cnn.dropout_keep # cnn.a_neg_mask:a_neg_mask # cnn.q_pos_overlap:q_pos_embedding, # cnn.q_neg_overlap:q_neg_embedding, # cnn.a_pos_overlap:a_pos_embedding, # cnn.a_neg_overlap:a_neg_embedding, # cnn.q_position:q_position, # cnn.a_pos_position:a_pos_position, # cnn.a_neg_position:a_neg_position } question,answer,score = sess.run([cnn.question,cnn.answer,cnn.score12],feed_dict) print( question.shape,answer.shape) print( score)
/models/__init__.py
from .QA_CNN_pairwise import QA_CNN_extend as CNN from .QA_RNN_pairwise import QA_RNN_extend as RNN from .QA_CNN_quantum_pairwise import QA_CNN_extend as QCNN def setup(opt): if opt["model_name"]=="cnn": model=CNN(opt) elif opt["model_name"]=="rnn": model=RNN(opt) elif opt['model_name']=='qcnn': model=QCNN(opt) else: print("no model") exit(0) return model
/models/my/nn.py
from my.general import flatten, reconstruct, add_wd, exp_mask import numpy as np import tensorflow as tf _BIAS_VARIABLE_NAME = "bias" _WEIGHTS_VARIABLE_NAME = "kernel" def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0, is_train=None):#, name_w='', name_b='' # if args is None or (nest.is_sequence(args) and not args): # raise ValueError("`args` must be specified") # if not nest.is_sequence(args): # args = [args] flat_args = [flatten(arg, 1) for arg in args]#[210,20] # if input_keep_prob < 1.0: # assert is_train is not None flat_args = [tf.nn.dropout(arg, input_keep_prob) for arg in flat_args] total_arg_size = 0#[60] shapes = [a.get_shape() for a in flat_args] for shape in shapes: if shape.ndims != 2: raise ValueError("linear is expecting 2D arguments: %s" % shapes) if shape[1].value is None: raise ValueError("linear expects shape[1] to be provided for shape %s, " "but saw %s" % (shape, shape[1])) else: total_arg_size += shape[1].value # print(total_arg_size) # exit() dtype = [a.dtype for a in flat_args][0] # scope = tf.get_variable_scope() with tf.variable_scope(scope) as outer_scope: weights = tf.get_variable(_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype) if len(flat_args) == 1: res = tf.matmul(flat_args[0], weights) else: res = tf.matmul(tf.concat(flat_args, 1), weights) if not bias: flat_out = res else: with tf.variable_scope(outer_scope) as inner_scope: inner_scope.set_partitioner(None) biases = tf.get_variable( _BIAS_VARIABLE_NAME, [output_size], dtype=dtype, initializer=tf.constant_initializer(bias_start, dtype=dtype)) flat_out = tf.nn.bias_add(res, biases) out = reconstruct(flat_out, args[0], 1) if squeeze: out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1]) if wd: add_wd(wd) return out def softmax(logits, mask=None, scope=None): with tf.name_scope(scope or "Softmax"): if mask is not None: logits = exp_mask(logits, mask) flat_logits = flatten(logits, 1) flat_out = tf.nn.softmax(flat_logits) out = reconstruct(flat_out, logits, 1) return out def softsel(target, logits, mask=None, scope=None): """ :param target: [ ..., J, d] dtype=float :param logits: [ ..., J], dtype=float :param mask: [ ..., J], dtype=bool :param scope: :return: [..., d], dtype=float """ with tf.name_scope(scope or "Softsel"): a = softmax(logits, mask = mask) target_rank = len(target.get_shape().as_list()) out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2) return out def highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0): with tf.variable_scope(scope or "highway_layer"): d = arg.get_shape()[-1] trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob) trans = tf.nn.relu(trans) gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob) gate = tf.nn.sigmoid(gate) out = gate * trans + (1 - gate) * arg return out def highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0): with tf.variable_scope(scope or "highway_network"): prev = arg cur = None for layer_idx in range(num_layers): cur = highway_layer(prev, bias, bias_start=bias_start, scope="layer_{}".format(layer_idx), wd=wd, input_keep_prob=input_keep_prob) prev = cur return cur def conv1d(in_, filter_size, height, padding, keep_prob=1.0, scope=None): with tf.variable_scope(scope or "conv1d"): num_channels = in_.get_shape()[-1] filter_ = tf.get_variable("filter", shape=[1, height, num_channels, filter_size], dtype='float') bias = tf.get_variable("bias", shape=[filter_size], dtype='float') strides = [1, 1, 1, 1] in_ = tf.nn.dropout(in_, keep_prob) xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias # [N*M, JX, W/filter_stride, d] out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d] return out def multi_conv1d(in_, filter_sizes, heights, padding, keep_prob=1.0, scope=None): with tf.variable_scope(scope or "multi_conv1d"): assert len(filter_sizes) == len(heights) outs = [] for filter_size, height in zip(filter_sizes, heights): if filter_size == 0: continue out = conv1d(in_, filter_size, height, padding, keep_prob=keep_prob, scope="conv1d_{}".format(height)) outs.append(out) concat_out = tf.concat(outs, axis=2) return concat_out if __name__ == '__main__': a = tf.Variable(np.random.random(size=(2,2,4))) b = tf.Variable(np.random.random(size=(2,3,4))) c = tf.tile(tf.expand_dims(a, 2), [1, 1, 3, 1]) test = flatten(c,1) out = reconstruct(test, c, 1) d = tf.tile(tf.expand_dims(b, 1), [1, 2, 1, 1]) e = linear([c,d,c*d],1,bias = False,scope = "test",) # f = softsel(d, e) with tf.Session() as sess: tf.global_variables_initializer().run() print(sess.run(test)) print(sess.run(tf.shape(out))) exit() print(sess.run(tf.shape(a))) print(sess.run(a)) print(sess.run(tf.shape(b))) print(sess.run(b)) print(sess.run(tf.shape(c))) print(sess.run(c)) print(sess.run(tf.shape(d))) print(sess.run(d)) print(sess.run(tf.shape(e))) print(sess.run(e))
/run.py
from tensorflow import flags import tensorflow as tf from config import Singleton import data_helper import datetime,os import models import numpy as np import evaluation import sys import logging import time now = int(time.time()) timeArray = time.localtime(now) timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray) log_filename = "log/" +time.strftime("%Y%m%d", timeArray) program = os.path.basename('program') logger = logging.getLogger(program) if not os.path.exists(log_filename): os.makedirs(log_filename) logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w') logging.root.setLevel(level=logging.INFO) logger.info("running %s" % ' '.join(sys.argv)) from data_helper import log_time_delta,getLogger logger=getLogger() args = Singleton().get_qcnn_flag() args._parse_flags() opts=dict() logger.info("\nParameters:") for attr, value in sorted(args.__flags.items()): logger.info(("{}={}".format(attr.upper(), value))) opts[attr]=value train,test,dev = data_helper.load(args.data,filter = args.clean) q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split())) a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split())) alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data ) logger.info('the number of words :%d '%len(alphabet)) if args.data=="quora" or args.data=="8008" : print("cn embedding") embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data ) train_data_loader = data_helper.getBatch48008 else: embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data ) train_data_loader = data_helper.get_mini_batch opts["embeddings"] =embedding opts["vocab_size"]=len(alphabet) opts["max_input_right"]=a_max_sent_length opts["max_input_left"]=q_max_sent_length opts["filter_sizes"]=list(map(int, args.filter_sizes.split(","))) print("innitilize over") #with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)): with tf.Graph().as_default(): # with tf.device("/cpu:0"): session_conf = tf.ConfigProto() session_conf.allow_soft_placement = args.allow_soft_placement session_conf.log_device_placement = args.log_device_placement session_conf.gpu_options.allow_growth = True sess = tf.Session(config=session_conf) model=models.setup(opts) model.build_graph() saver = tf.train.Saver() # ckpt = tf.train.get_checkpoint_state("checkpoint") # if ckpt and ckpt.model_checkpoint_path: # # Restores from checkpoint # saver.restore(sess, ckpt.model_checkpoint_path) # if os.path.exists("model") : # import shutil # shutil.rmtree("model") # builder = tf.saved_model.builder.SavedModelBuilder("./model") # builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) # builder.save(True) # variable_averages = tf.train.ExponentialMovingAverage( model) # variables_to_restore = variable_averages.variables_to_restore() # saver = tf.train.Saver(variables_to_restore) # for name in variables_to_restore: # print(name) sess.run(tf.global_variables_initializer()) @log_time_delta def predict(model,sess,batch,test): scores = [] for data in batch: score = model.predict(sess,data) scores.extend(score) return np.array(scores[:len(test)]) best_p1=0 for i in range(args.num_epoches): for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess): # for data in data_helper.getBatch48008(train,alphabet,args.batch_size): _, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data) time_str = datetime.datetime.now().isoformat() print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13))) logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13))) #<<<<<<< HEAD # # # if i>0 and i % 5 ==0: # test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size) # # predicted_test = predict(model,sess,test_datas,test) # map_mrr_test = evaluation.evaluationBypandas(test,predicted_test) # # logger.info('map_mrr test' +str(map_mrr_test)) # print('map_mrr test' +str(map_mrr_test)) # # test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size) # predicted_test = predict(model,sess,test_datas,dev) # map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test) # # logger.info('map_mrr dev' +str(map_mrr_test)) # print('map_mrr dev' +str(map_mrr_test)) # map,mrr,p1 = map_mrr_test # if p1>best_p1: # best_p1=p1 # filename= "checkpoint/"+args.data+"_"+str(p1)+".model" # save_path = saver.save(sess, filename) # # load_path = saver.restore(sess, model_path) # # import shutil # shutil.rmtree("model") # builder = tf.saved_model.builder.SavedModelBuilder("./model") # builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) # builder.save(True) # # #======= test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size) predicted_test = predict(model,sess,test_datas,test) map_mrr_test = evaluation.evaluationBypandas(test,predicted_test) logger.info('map_mrr test' +str(map_mrr_test)) print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))
/test.py
# -*- coding: utf-8 -*- from tensorflow import flags import tensorflow as tf from config import Singleton import data_helper import datetime import os import models import numpy as np import evaluation from data_helper import log_time_delta,getLogger logger=getLogger() args = Singleton().get_rnn_flag() #args = Singleton().get_8008_flag() args._parse_flags() opts=dict() logger.info("\nParameters:") for attr, value in sorted(args.__flags.items()): logger.info(("{}={}".format(attr.upper(), value))) opts[attr]=value train,test,dev = data_helper.load(args.data,filter = args.clean) q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split())) a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split())) alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data ) logger.info('the number of words :%d '%len(alphabet)) if args.data=="quora" or args.data=="8008" : print("cn embedding") embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data ) train_data_loader = data_helper.getBatch48008 else: embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data ) train_data_loader = data_helper.get_mini_batch opts["embeddings"] =embedding opts["vocab_size"]=len(alphabet) opts["max_input_right"]=a_max_sent_length opts["max_input_left"]=q_max_sent_length opts["filter_sizes"]=list(map(int, args.filter_sizes.split(","))) print("innitilize over") #with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)): with tf.Graph().as_default(): # with tf.device("/cpu:0"): session_conf = tf.ConfigProto() session_conf.allow_soft_placement = args.allow_soft_placement session_conf.log_device_placement = args.log_device_placement session_conf.gpu_options.allow_growth = True sess = tf.Session(config=session_conf) model=models.setup(opts) model.build_graph() saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) # fun first than print or save ckpt = tf.train.get_checkpoint_state("checkpoint") if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint saver.restore(sess, ckpt.model_checkpoint_path) print(sess.run(model.position_embedding)[0]) if os.path.exists("model") : import shutil shutil.rmtree("model") builder = tf.saved_model.builder.SavedModelBuilder("./model") builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) builder.save(True) variable_averages = tf.train.ExponentialMovingAverage( model) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) for name in variables_to_restore: print(name) @log_time_delta def predict(model,sess,batch,test): scores = [] for data in batch: score = model.predict(sess,data) scores.extend(score) return np.array(scores[:len(test)]) text = "怎么 提取 公积金 ?" splited_text=data_helper.encode_to_split(text,alphabet) mb_q,mb_q_mask = data_helper.prepare_data([splited_text]) mb_a,mb_a_mask = data_helper.prepare_data([splited_text]) data = (mb_q,mb_a,mb_q_mask,mb_a_mask) score = model.predict(sess,data) print(score) feed_dict = { model.question:data[0], model.answer:data[1], model.q_mask:data[2], model.a_mask:data[3], model.dropout_keep_prob_holder:1.0 } sess.run(model.position_embedding,feed_dict=feed_dict)[0]
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
Sssssbo/SDCNet
refs/heads/master
"{\"/SDCNet.py\": [\"/model.py\", \"/datasets.py\", \"/misc.py\"], \"/infer_SDCNet.py\": [\"/model.p(...TRUNCATED)
"└── \n ├── SDCNet.py\n ├── count_dataset.py\n ├── create_free.py\n(...TRUNCATED)
/SDCNet.py
"import datetime\nimport os\nimport time\n\nimport torch\nfrom torch import nn\nfrom torch import op(...TRUNCATED)
/count_dataset.py
"import numpy as np\nimport os\n\nimport torch\nfrom PIL import Image\nfrom torch.autograd import Va(...TRUNCATED)
/create_free.py
"import numpy as np\nimport os\n\nimport torch\nfrom PIL import Image\nfrom torch.autograd import Va(...TRUNCATED)
/datasets.py
"import os\nimport os.path\n\nimport torch.utils.data as data\nfrom PIL import Image\n\n\nclass Imag(...TRUNCATED)
/infer_SDCNet.py
"import numpy as np\nimport os\n\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Ima(...TRUNCATED)
/misc.py
"import numpy as np\nimport os\nimport pylab as pl\n\n#import pydensecrf.densecrf as dcrf\n\n\nclass(...TRUNCATED)
/model.py
"import torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom resnext import ResNeXt1(...TRUNCATED)
/model/backbones/resnet.py
"import math\n\nimport torch\nfrom torch import nn\n\n\ndef conv3x3(in_planes, out_planes, stride=1)(...TRUNCATED)
/model/make_model.py
"import torch\nimport torch.nn as nn\nfrom .backbones.resnet import ResNet, Comb_ResNet, Pure_ResNet(...TRUNCATED)
/resnet/__init__.py
from .make_model import ResNet50, ResNet50_BIN, ResNet50_LowIN
/resnet/config.py
resnet50_path = './resnet/resnet50-19c8e357.pth'
/resnet/make_model.py
"from .resnet import ResNet, BasicBlock, Bottleneck\nimport torch\nfrom torch import nn\nfrom .confi(...TRUNCATED)
/resnext/__init__.py
from .resnext101 import ResNeXt101
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
riadghorra/whiteboard-oop-project
refs/heads/master
"{\"/src/tools.py\": [\"/src/figures.py\"], \"/src/white_board.py\": [\"/src/figures.py\", \"/src/to(...TRUNCATED)
"└── \n └── src\n ├── client.py\n ├── figures.py\n (...TRUNCATED)
/src/client.py
"import socket\nimport json\nimport sys\nimport math\nfrom white_board import WhiteBoard, binary_to_(...TRUNCATED)
/src/figures.py
"\"\"\"\nModule contenant toutes les figures et opérations de base\n\"\"\"\n\nimport pygame\nimport(...TRUNCATED)
/src/main.py
"from white_board import WhiteBoard\nimport json\n\n'''\nThis file is used to run locally or to debu(...TRUNCATED)
/src/serveur.py
"import socket\nimport sys\nimport time\nfrom threading import Thread\nimport json\n\n'''\nLes deux (...TRUNCATED)
/src/tools.py
"\"\"\"\nModule contenant les differents outils de gestion du tableau\n\"\"\"\nimport pygame\nimport(...TRUNCATED)
/src/white_board.py
"import pygame\nimport pygame.draw\nimport json\nimport sys\nfrom functools import reduce\nimport op(...TRUNCATED)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
pyfaddist/yafcorse
refs/heads/main
"{\"/tests/conftest.py\": [\"/src/yafcorse/__init__.py\"], \"/tests/test_ceate_extensions.py\": [\"/(...TRUNCATED)
"└── \n ├── src\n │ └── yafcorse\n │ └── __init__.py\n (...TRUNCATED)
/src/yafcorse/__init__.py
"import re\nfrom typing import Callable, Iterable\nfrom flask import Flask, Response, request\n\n# Y(...TRUNCATED)
/tests/conftest.py
"import pytest\nfrom flask import Flask\n\nfrom yafcorse import Yafcorse\n\n\[email protected]()\ndef(...TRUNCATED)
/tests/test_ceate_extensions.py
"from flask.app import Flask\n\nfrom yafcorse import Yafcorse\n\n\ndef test_extension(app: Flask):\n(...TRUNCATED)
/tests/test_default_configuration.py
# def test_no_cors_enabled(): # assert False
/tests/test_origins_function.py
"import pytest\nfrom flask import Flask, Response\nfrom flask.testing import FlaskClient\n\nfrom yaf(...TRUNCATED)
/tests/test_preflight_request.py
"from flask import Response\nfrom flask.testing import FlaskClient\n\n\n# def test_with_origin(clien(...TRUNCATED)
/tests/test_simple_request.py
"from flask import Flask, Response\nfrom flask.testing import FlaskClient\n\n\ndef test_simple_reque(...TRUNCATED)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
ericfourrier/auto-clean
refs/heads/develop
"{\"/test.py\": [\"/autoc/utils/helpers.py\", \"/autoc/outliersdetection.py\", \"/autoc/explorer.py\(...TRUNCATED)
"└── \n ├── autoc\n │ ├── __init__.py\n │ ├── exceptions.py(...TRUNCATED)
/autoc/__init__.py
"__all__ = [\"explorer\", \"naimputer\"]\nfrom .explorer import DataExploration\nfrom .naimputer imp(...TRUNCATED)
/autoc/exceptions.py
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: efourrier\n\nPurpose : File with a(...TRUNCATED)
/autoc/explorer.py
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: efourrier\n\nPurpose : This is a f(...TRUNCATED)
/autoc/naimputer.py
"from autoc.explorer import DataExploration, pd\nfrom autoc.utils.helpers import cserie\nimport seab(...TRUNCATED)
/autoc/outliersdetection.py
"\"\"\"\n@author: efourrier\n\nPurpose : This is a simple experimental class to detect outliers. Thi(...TRUNCATED)
/autoc/preprocess.py
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: efourrier\n\nPurpose : The purpose(...TRUNCATED)
/autoc/utils/corrplot.py
"import seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef plot_corrmatrix(df, square=True, li(...TRUNCATED)
/autoc/utils/getdata.py
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: efourrier\n\nPurpose : Get data fr(...TRUNCATED)
/autoc/utils/helpers.py
"# -*- coding: utf-8 -*-\n\"\"\"\n@author: efourrier\n\nPurpose : Create toolbox functions to use fo(...TRUNCATED)
/setup.py
"from setuptools import setup, find_packages\n\n\ndef readme():\n with open('README.md') as f:\n (...TRUNCATED)
/test.py
"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: efourrier\n\nPurpose : Automated test suites with unitt(...TRUNCATED)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
thinkAmi-sandbox/AWS_CDK-sample
refs/heads/master
{"/step_functions/app.py": ["/step_functions/step_functions/step_functions_stack.py"]}
"└── \n └── step_functions\n ├── app.py\n └── step_function(...TRUNCATED)
/step_functions/app.py
"#!/usr/bin/env python3\n\nfrom aws_cdk import core\n\nfrom step_functions.step_functions_stack impo(...TRUNCATED)
/step_functions/step_functions/lambda_function/error/lambda_function.py
"import json\n\n\ndef lambda_handler(event, context):\n # {\n # \"resource\": \"arn:aws:lamb(...TRUNCATED)
/step_functions/step_functions/lambda_function/first/lambda_function.py
"import os\nimport boto3\nfrom numpy.random import rand\n\n\ndef lambda_handler(event, context):\n (...TRUNCATED)
/step_functions/step_functions/lambda_function/second/lambda_function.py
"def lambda_handler(event, context):\n if event['parallel_no'] % 2 == 0:\n raise Exception(...TRUNCATED)
/step_functions/step_functions/lambda_function/third/lambda_function.py
"def lambda_handler(event, context):\n if event['parallel_no'] == 1:\n raise Exception('(...TRUNCATED)
/step_functions/step_functions/settings.example.py
AWS_SCIPY_ARN = 'arn:aws:lambda:region:account_id:layer:AWSLambda-Python37-SciPy1x:2'
/step_functions/step_functions/step_functions_stack.py
"import pathlib\n\nfrom aws_cdk import core\nfrom aws_cdk.aws_iam import PolicyStatement, Effect, Ma(...TRUNCATED)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
greenmato/slackline-spots
refs/heads/master
"{\"/spots-api/map/urls.py\": [\"/spots-api/map/api.py\", \"/spots-api/map/views.py\"], \"/spots-api(...TRUNCATED)
"└── \n └── spots-api\n └── map\n ├── api.py\n (...TRUNCATED)
/spots-api/map/api.py
"from abc import ABC, ABCMeta, abstractmethod\nfrom django.forms.models import model_to_dict\nfrom d(...TRUNCATED)
/spots-api/map/forms.py
"from django import forms\nfrom django.forms import ModelForm, Textarea\n\nfrom map.models import Sp(...TRUNCATED)
/spots-api/map/migrations/0001_initial.py
"# Generated by Django 2.0 on 2017-12-17 18:04\n\nfrom django.db import migrations, models\n\n\nclas(...TRUNCATED)
/spots-api/map/migrations/0005_auto_20180305_2131.py
"# Generated by Django 2.0.1 on 2018-03-05 21:31\n\nfrom django.db import migrations, models\n\n\ncl(...TRUNCATED)
/spots-api/map/migrations/0007_auto_20180305_2139.py
"# Generated by Django 2.0.1 on 2018-03-05 21:39\n\nfrom django.db import migrations\n\n\nclass Migr(...TRUNCATED)
/spots-api/map/migrations/0008_auto_20180305_2211.py
"# Generated by Django 2.0.1 on 2018-03-05 22:11\n\nfrom django.db import migrations, models\n\n\ncl(...TRUNCATED)
/spots-api/map/migrations/0009_auto_20180305_2215.py
"# Generated by Django 2.0.1 on 2018-03-05 22:15\n\nfrom django.db import migrations, models\nimport(...TRUNCATED)
/spots-api/map/migrations/0010_auto_20180306_2119.py
"# Generated by Django 2.0.1 on 2018-03-06 21:19\n\nfrom django.db import migrations\n\n\nclass Migr(...TRUNCATED)
/spots-api/map/models.py
"from django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidat(...TRUNCATED)
/spots-api/map/urls.py
"from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import(...TRUNCATED)
/spots-api/map/views.py
"from django.shortcuts import render\nfrom django.views import View\n\nclass MapView(View):\n def(...TRUNCATED)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
katrii/ohsiha
refs/heads/master
{"/ohjelma/views.py": ["/ohjelma/models.py"]}
"└── \n └── ohjelma\n ├── apps.py\n ├── migrations\n (...TRUNCATED)
/ohjelma/apps.py
from django.apps import AppConfig class OhjelmaConfig(AppConfig): name = 'ohjelma'
/ohjelma/migrations/0002_song.py
"# Generated by Django 3.0.2 on 2020-03-13 17:36\n\nfrom django.db import migrations, models\n\n\ncl(...TRUNCATED)
/ohjelma/migrations/0003_song_release_year.py
"# Generated by Django 3.0.2 on 2020-03-15 16:01\n\nfrom django.db import migrations, models\n\n\ncl(...TRUNCATED)
/ohjelma/migrations/0004_track.py
"# Generated by Django 3.0.2 on 2020-03-28 23:19\n\nfrom django.db import migrations, models\n\n\ncl(...TRUNCATED)
/ohjelma/migrations/0005_auto_20200329_1313.py
"# Generated by Django 3.0.2 on 2020-03-29 10:13\n\nfrom django.db import migrations, models\n\n\ncl(...TRUNCATED)
/ohjelma/migrations/0006_auto_20200329_1329.py
"# Generated by Django 3.0.2 on 2020-03-29 10:29\n\nfrom django.db import migrations, models\n\n\ncl(...TRUNCATED)
/ohjelma/migrations/0007_track_track_id.py
"# Generated by Django 3.0.2 on 2020-04-11 18:42\n\nfrom django.db import migrations, models\n\n\ncl(...TRUNCATED)
/ohjelma/migrations/0009_auto_20200411_2211.py
"# Generated by Django 3.0.2 on 2020-04-11 19:11\n\nfrom django.db import migrations, models\n\n\ncl(...TRUNCATED)
/ohjelma/models.py
"from django.db import models\nfrom django.urls import reverse\n\nclass Question(models.Model):\n (...TRUNCATED)
/ohjelma/urls.py
"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n\n path('', views.index, n(...TRUNCATED)
/ohjelma/views.py
"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.generi(...TRUNCATED)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
lukasld/Flask-Video-Editor
refs/heads/main
"{\"/app/api/VideoProcessing.py\": [\"/app/api/decorators.py\", \"/app/api/errors.py\"], \"/app/api/(...TRUNCATED)
"└── \n ├── app\n │ ├── __init__.py\n │ ├── api\n │ (...TRUNCATED)
/app/__init__.py
"from flask import Flask\nfrom config import config\nfrom flask_caching import Cache\n\nfrom flask_s(...TRUNCATED)
/app/api/VideoProcessing.py
"from werkzeug.utils import secure_filename\nfrom functools import partial\nimport subprocess as sp\(...TRUNCATED)
/app/api/__init__.py
"from flask import Blueprint\n\napi = Blueprint('videoApi', __name__)\n\nfrom . import videoApi, err(...TRUNCATED)
/app/api/decorators.py
"from flask import request, jsonify\nfrom functools import wraps\n\nfrom .errors import InvalidAPIUs(...TRUNCATED)
/app/api/errors.py
"import sys\nimport traceback\nfrom flask import jsonify, request\n\nfrom . import api\n\nclass Inva(...TRUNCATED)
/app/api/help.py
"from flask import jsonify, request, send_from_directory\nfrom . decorators import parameter_check\n(...TRUNCATED)
/app/api/utils.py
"import cv2\nimport math\nimport string\nimport random\nimport numpy as np\nimport skvideo.io\nfrom (...TRUNCATED)
/app/api/videoApi.py
"import os\nfrom flask import Flask, request, redirect, \\\n url_for, session, json(...TRUNCATED)
/app/docs/__init__.py
"from flask_swagger_ui import get_swaggerui_blueprint\n\n\nswagger_ui = get_swaggerui_blueprint(\n (...TRUNCATED)
/app/main/errors.py
"from flask import redirect, url_for, jsonify\nfrom . import main\n\[email protected]_errorhandler(404)\nde(...TRUNCATED)
/config.py
"import os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n\nclass Config:\n\n \"\"\"\n (...TRUNCATED)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
junprog/contrastive-baseline
refs/heads/main
"{\"/linear_eval.py\": [\"/models/create_linear_eval_model.py\", \"/utils/visualizer.py\", \"/datase(...TRUNCATED)
"└── \n ├── datasets\n │ ├── cifar10.py\n │ └── spatial.py\(...TRUNCATED)
/datasets/cifar10.py
"from typing import Callable, Optional\nimport random\n\nfrom PIL import Image\nimport numpy as np\n(...TRUNCATED)
/datasets/spatial.py
"# in : original image\n# out : cropped img1 (anchor)\n# cropped img2 (compete)\n# targ(...TRUNCATED)
/exp.py
"import torch\nimport torchvision\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nimpo(...TRUNCATED)
/linear_eval.py
"import os\nimport argparse\nimport logging\nimport numpy as np\n\nimport torch\nimport torch.nn as (...TRUNCATED)
/models/cosine_contrastive_loss.py
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F \n\n\ndef D(p, z, version='sim(...TRUNCATED)
/models/create_linear_eval_model.py
"import os\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch(...TRUNCATED)
/models/l2_contrastive_loss.py
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass L2ContrastiveLoss(nn.(...TRUNCATED)
/models/siamese_net.py
"import torch\nimport torch.nn as nn\n\nclass SiameseNetwork(nn.Module):\n def __init__(self, mod(...TRUNCATED)
/models/simple_siamese_net_tmp.py
"import torch\nimport torch.nn as nn\n\nclass projection_MLP(nn.Module):\n def __init__(self, in_(...TRUNCATED)
/train.py
"from utils.contrastive_trainer import CoTrainer\nfrom utils.simsiam_trainer import SimSiamTrainer\n(...TRUNCATED)
/train_val_split.py
"import os\nfrom glob import glob\n\nimport numpy as np\nimport argparse\n\ndef parse_args():\n p(...TRUNCATED)
/utils/contrastive_trainer.py
"import os\nimport sys\nimport time\nimport logging\n\nimport numpy as np\n\nimport torch\nfrom torc(...TRUNCATED)
/utils/helper.py
"import os\nimport numpy as np\nimport torch\n\ndef worker_init_fn(worker_id): (...TRUNCATED)
/utils/simsiam_trainer.py
"import os\nimport sys\nimport time\nimport logging\n\nimport numpy as np\n\nimport torch\nfrom torc(...TRUNCATED)
/utils/visualizer.py
"import os\nimport numpy as np\nfrom PIL import Image\n\nimport torch\n\nimport matplotlib\nmatplotl(...TRUNCATED)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
5