repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
unknown
revision_date
unknown
committer_date
unknown
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
unknown
gha_updated_at
unknown
gha_pushed_at
unknown
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
shuishen112/pairwise-rnn
https://github.com/shuishen112/pairwise-rnn
2e8d800ef711ed95a3e75a18cac98dc4fb75fe5d
f290baf3afde5e67c5ad0b21244d19cbc14869df
7d88427c34ee31e4d71bc609006d3dfdca92fb39
refs/heads/master
"2021-09-04T17:22:29.355000"
"2018-01-20T10:00:56"
"2018-01-20T10:00:56"
106,633,864
5
0
null
null
null
null
null
[ { "alpha_fraction": 0.5468424558639526, "alphanum_fraction": 0.5530881285667419, "avg_line_length": 36.0431022644043, "blob_id": "d7390897bbca1966acc343e8cfd429cb583223c9", "content_id": "acc18bd9283ded728d720c858d6162cd19e5f973", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4323, "license_type": "no_license", "max_line_length": 159, "num_lines": 116, "path": "/main.py", "repo_name": "shuishen112/pairwise-rnn", "src_encoding": "UTF-8", "text": "\nimport data_helper\nimport time\nimport datetime\nimport os\nimport tensorflow as tf\n\nimport numpy as np\nimport evaluation\nnow = int(time.time()) \n \ntimeArray = time.localtime(now)\ntimeStamp = time.strftime(\"%Y%m%d%H%M%S\", timeArray)\ntimeDay = time.strftime(\"%Y%m%d\", timeArray)\nprint (timeStamp)\n\ndef main(args):\n args._parse_flags()\n print(\"\\nParameters:\")\n for attr, value in sorted(args.__flags.items()):\n print((\"{}={}\".format(attr.upper(), value)))\n log_dir = 'log/'+ timeDay\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n data_file = log_dir + '/test_' + args.data + timeStamp\n precision = data_file + 'precise'\n print('load data ...........')\n train,test,dev = data_helper.load(args.data,filter = args.clean)\n\n q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))\n a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))\n\n alphabet = data_helper.get_alphabet([train,test,dev])\n print('the number of words',len(alphabet))\n\n print('get embedding')\n if args.data==\"quora\":\n embedding = data_helper.get_embedding(alphabet,language=\"cn\")\n else:\n embedding = data_helper.get_embedding(alphabet)\n \n \n\n with tf.Graph().as_default(), tf.device(\"/gpu:\" + str(args.gpu)):\n # with tf.device(\"/cpu:0\"):\n session_conf = tf.ConfigProto()\n session_conf.allow_soft_placement = args.allow_soft_placement\n session_conf.log_device_placement = args.log_device_placement\n session_conf.gpu_options.allow_growth = True\n sess = tf.Session(config=session_conf)\n\n model = QA_CNN_extend(max_input_left = q_max_sent_length,\n max_input_right = a_max_sent_length,\n batch_size = args.batch_size,\n vocab_size = len(alphabet),\n embedding_size = args.embedding_dim,\n filter_sizes = list(map(int, args.filter_sizes.split(\",\"))),\n num_filters = args.num_filters, \n hidden_size = args.hidden_size,\n dropout_keep_prob = args.dropout_keep_prob,\n embeddings = embedding,\n l2_reg_lambda = args.l2_reg_lambda,\n trainable = args.trainable,\n pooling = args.pooling,\n conv = args.conv)\n\n model.build_graph()\n\n sess.run(tf.global_variables_initializer())\n def train_step(model,sess,batch):\n for data in batch:\n feed_dict = {\n model.question:data[0],\n model.answer:data[1],\n model.answer_negative:data[2],\n model.q_mask:data[3],\n model.a_mask:data[4],\n model.a_neg_mask:data[5]\n\n }\n _, summary, step, loss, accuracy,score12, score13, see = sess.run(\n [model.train_op, model.merged,model.global_step,model.loss, model.accuracy,model.score12,model.score13, model.see],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}\".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))\n def predict(model,sess,batch,test):\n scores = []\n for data in batch:\n feed_dict = {\n model.question:data[0],\n model.answer:data[1],\n model.q_mask:data[2],\n model.a_mask:data[3]\n\n }\n score = sess.run(\n model.score12,\n feed_dict)\n scores.extend(score)\n \n return np.array(scores[:len(test)])\n \n \n \n\n \n for i in range(args.num_epoches):\n datas = data_helper.get_mini_batch(train,alphabet,args.batch_size)\n train_step(model,sess,datas)\n test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)\n\n predicted_test = predict(model,sess,test_datas,test)\n print(len(predicted_test))\n print(len(test))\n map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)\n\n print('map_mrr test',map_mrr_test)\n\n\n\n\n\n \n\n\n\n" }, { "alpha_fraction": 0.597571074962616, "alphanum_fraction": 0.6273959279060364, "avg_line_length": 60.426395416259766, "blob_id": "1ab240f2c43cb1b3766e62d98797b241e95b6646", "content_id": "e12a44fef7ed15292df9d604acd576ed55cb8bc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12104, "license_type": "no_license", "max_line_length": 121, "num_lines": 197, "path": "/config.py", "repo_name": "shuishen112/pairwise-rnn", "src_encoding": "UTF-8", "text": "class Singleton(object):\n __instance=None\n def __init__(self):\n pass\n def getInstance(self):\n if Singleton.__instance is None:\n # Singleton.__instance=object.__new__(cls,*args,**kwd)\n Singleton.__instance=self.get_test_flag()\n print(\"build FLAGS over\")\n return Singleton.__instance\n def get_test_flag(self):\n import tensorflow as tf\n flags = tf.app.flags\n if len(flags.FLAGS.__dict__.keys())<=2:\n\n flags.DEFINE_integer(\"embedding_size\",300, \"Dimensionality of character embedding (default: 128)\")\n flags.DEFINE_string(\"filter_sizes\", \"1,2,3,5\", \"Comma-separated filter sizes (default: '3,4,5')\")\n flags.DEFINE_integer(\"num_filters\", 64, \"Number of filters per filter size (default: 128)\")\n flags.DEFINE_float(\"dropout_keep_prob\", 1, \"Dropout keep probability (default: 0.5)\")\n flags.DEFINE_float(\"l2_reg_lambda\", 0.000001, \"L2 regularizaion lambda (default: 0.0)\")\n flags.DEFINE_float(\"learning_rate\", 5e-3, \"learn rate( default: 0.0)\")\n flags.DEFINE_integer(\"max_len_left\", 40, \"max document length of left input\")\n flags.DEFINE_integer(\"max_len_right\", 40, \"max document length of right input\")\n flags.DEFINE_string(\"loss\",\"pair_wise\",\"loss function (default:point_wise)\")\n flags.DEFINE_integer(\"hidden_size\",100,\"the default hidden size\")\n flags.DEFINE_string(\"model_name\", \"cnn\", \"cnn or rnn\")\n\n # Training parameters\n flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\n flags.DEFINE_boolean(\"trainable\", False, \"is embedding trainable? (default: False)\")\n flags.DEFINE_integer(\"num_epoches\", 1000, \"Number of training epochs (default: 200)\")\n flags.DEFINE_integer(\"evaluate_every\", 500, \"Evaluate model on dev set after this many steps (default: 100)\")\n flags.DEFINE_integer(\"checkpoint_every\", 500, \"Save model after this many steps (default: 100)\")\n\n flags.DEFINE_string('data','wiki','data set')\n flags.DEFINE_string('pooling','max','max pooling or attentive pooling')\n flags.DEFINE_boolean('clean',True,'whether we clean the data')\n flags.DEFINE_string('conv','wide','wide conv or narrow')\n flags.DEFINE_integer('gpu',0,'gpu number')\n # Misc Parameters\n flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\n flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n return flags.FLAGS\n def get_rnn_flag(self):\n import tensorflow as tf\n flags = tf.app.flags\n if len(flags.FLAGS.__dict__.keys())<=2:\n\n flags.DEFINE_integer(\"embedding_size\",300, \"Dimensionality of character embedding (default: 128)\")\n flags.DEFINE_string(\"filter_sizes\", \"1,2,3,5\", \"Comma-separated filter sizes (default: '3,4,5')\")\n flags.DEFINE_integer(\"num_filters\", 64, \"Number of filters per filter size (default: 128)\")\n flags.DEFINE_float(\"dropout_keep_prob\", 1, \"Dropout keep probability (default: 0.5)\")\n flags.DEFINE_float(\"l2_reg_lambda\", 0.000001, \"L2 regularizaion lambda (default: 0.0)\")\n flags.DEFINE_float(\"learning_rate\", 0.001, \"learn rate( default: 0.0)\")\n flags.DEFINE_integer(\"max_len_left\", 40, \"max document length of left input\")\n flags.DEFINE_integer(\"max_len_right\", 40, \"max document length of right input\")\n flags.DEFINE_string(\"loss\",\"pair_wise\",\"loss function (default:point_wise)\")\n flags.DEFINE_integer(\"hidden_size\",100,\"the default hidden size\")\n flags.DEFINE_string(\"model_name\", \"rnn\", \"cnn or rnn\")\n\n # Training parameters\n flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\n flags.DEFINE_boolean(\"trainable\", False, \"is embedding trainable? (default: False)\")\n flags.DEFINE_integer(\"num_epoches\", 1000, \"Number of training epochs (default: 200)\")\n flags.DEFINE_integer(\"evaluate_every\", 500, \"Evaluate model on dev set after this many steps (default: 100)\")\n flags.DEFINE_integer(\"checkpoint_every\", 500, \"Save model after this many steps (default: 100)\")\n\n\n# flags.DEFINE_string('data','8008','data set')\n\n flags.DEFINE_string('data','trec','data set')\n\n flags.DEFINE_string('pooling','max','max pooling or attentive pooling')\n flags.DEFINE_boolean('clean',False,'whether we clean the data')\n flags.DEFINE_string('conv','wide','wide conv or narrow')\n flags.DEFINE_integer('gpu',0,'gpu number')\n # Misc Parameters\n flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\n flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n return flags.FLAGS\n def get_cnn_flag(self):\n import tensorflow as tf\n flags = tf.app.flags\n if len(flags.FLAGS.__dict__.keys())<=2:\n\n flags.DEFINE_integer(\"embedding_size\",300, \"Dimensionality of character embedding (default: 128)\")\n flags.DEFINE_string(\"filter_sizes\", \"1,2,3,5\", \"Comma-separated filter sizes (default: '3,4,5')\")\n flags.DEFINE_integer(\"num_filters\", 64, \"Number of filters per filter size (default: 128)\")\n flags.DEFINE_float(\"dropout_keep_prob\", 0.8, \"Dropout keep probability (default: 0.5)\")\n flags.DEFINE_float(\"l2_reg_lambda\", 0.000001, \"L2 regularizaion lambda (default: 0.0)\")\n flags.DEFINE_float(\"learning_rate\", 5e-3, \"learn rate( default: 0.0)\")\n flags.DEFINE_integer(\"max_len_left\", 40, \"max document length of left input\")\n flags.DEFINE_integer(\"max_len_right\", 40, \"max document length of right input\")\n flags.DEFINE_string(\"loss\",\"pair_wise\",\"loss function (default:point_wise)\")\n flags.DEFINE_integer(\"hidden_size\",100,\"the default hidden size\")\n flags.DEFINE_string(\"model_name\", \"cnn\", \"cnn or rnn\")\n\n # Training parameters\n flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\n flags.DEFINE_boolean(\"trainable\", False, \"is embedding trainable? (default: False)\")\n flags.DEFINE_integer(\"num_epoches\", 1000, \"Number of training epochs (default: 200)\")\n flags.DEFINE_integer(\"evaluate_every\", 500, \"Evaluate model on dev set after this many steps (default: 100)\")\n flags.DEFINE_integer(\"checkpoint_every\", 500, \"Save model after this many steps (default: 100)\")\n\n flags.DEFINE_string('data','wiki','data set')\n flags.DEFINE_string('pooling','max','max pooling or attentive pooling')\n flags.DEFINE_boolean('clean',True,'whether we clean the data')\n flags.DEFINE_string('conv','wide','wide conv or narrow')\n flags.DEFINE_integer('gpu',0,'gpu number')\n # Misc Parameters\n flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\n flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n return flags.FLAGS\n\n\n def get_qcnn_flag(self):\n\n import tensorflow as tf\n flags = tf.app.flags\n if len(flags.FLAGS.__dict__.keys())<=2:\n\n\n flags.DEFINE_integer(\"embedding_size\",300, \"Dimensionality of character embedding (default: 128)\")\n flags.DEFINE_string(\"filter_sizes\", \"1,2,3,5\", \"Comma-separated filter sizes (default: '3,4,5')\")\n flags.DEFINE_integer(\"num_filters\", 128, \"Number of filters per filter size (default: 128)\")\n flags.DEFINE_float(\"dropout_keep_prob\", 0.8, \"Dropout keep probability (default: 0.5)\")\n flags.DEFINE_float(\"l2_reg_lambda\", 0.000001, \"L2 regularizaion lambda (default: 0.0)\")\n flags.DEFINE_float(\"learning_rate\", 0.001, \"learn rate( default: 0.0)\")\n\n flags.DEFINE_integer(\"max_len_left\", 40, \"max document length of left input\")\n flags.DEFINE_integer(\"max_len_right\", 40, \"max document length of right input\")\n flags.DEFINE_string(\"loss\",\"pair_wise\",\"loss function (default:point_wise)\")\n flags.DEFINE_integer(\"hidden_size\",100,\"the default hidden size\")\n\n flags.DEFINE_string(\"model_name\", \"qcnn\", \"cnn or rnn\")\n\n\n # Training parameters\n flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\n flags.DEFINE_boolean(\"trainable\", False, \"is embedding trainable? (default: False)\")\n flags.DEFINE_integer(\"num_epoches\", 1000, \"Number of training epochs (default: 200)\")\n flags.DEFINE_integer(\"evaluate_every\", 500, \"Evaluate model on dev set after this many steps (default: 100)\")\n flags.DEFINE_integer(\"checkpoint_every\", 500, \"Save model after this many steps (default: 100)\")\n\n\n flags.DEFINE_string('data','wiki','data set')\n flags.DEFINE_string('pooling','mean','max pooling or attentive pooling')\n\n flags.DEFINE_boolean('clean',True,'whether we clean the data')\n flags.DEFINE_string('conv','wide','wide conv or narrow')\n flags.DEFINE_integer('gpu',0,'gpu number')\n # Misc Parameters\n flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\n flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n return flags.FLAGS\n\n def get_8008_flag(self):\n import tensorflow as tf\n flags = tf.app.flags\n if len(flags.FLAGS.__dict__.keys())<=2:\n\n flags.DEFINE_integer(\"embedding_size\",200, \"Dimensionality of character embedding (default: 128)\")\n flags.DEFINE_string(\"filter_sizes\", \"1,2,3,5\", \"Comma-separated filter sizes (default: '3,4,5')\")\n flags.DEFINE_integer(\"num_filters\", 64, \"Number of filters per filter size (default: 128)\")\n flags.DEFINE_float(\"dropout_keep_prob\", 0.8, \"Dropout keep probability (default: 0.5)\")\n flags.DEFINE_float(\"l2_reg_lambda\", 0.000001, \"L2 regularizaion lambda (default: 0.0)\")\n flags.DEFINE_float(\"learning_rate\", 1e-3, \"learn rate( default: 0.0)\")\n flags.DEFINE_integer(\"max_len_left\", 40, \"max document length of left input\")\n flags.DEFINE_integer(\"max_len_right\", 40, \"max document length of right input\")\n flags.DEFINE_string(\"loss\",\"pair_wise\",\"loss function (default:point_wise)\")\n flags.DEFINE_integer(\"hidden_size\",100,\"the default hidden size\")\n flags.DEFINE_string(\"model_name\", \"rnn\", \"cnn or rnn\")\n\n # Training parameters\n flags.DEFINE_integer(\"batch_size\", 250, \"Batch Size (default: 64)\")\n flags.DEFINE_boolean(\"trainable\", False, \"is embedding trainable? (default: False)\")\n flags.DEFINE_integer(\"num_epoches\", 1000, \"Number of training epochs (default: 200)\")\n flags.DEFINE_integer(\"evaluate_every\", 500, \"Evaluate model on dev set after this many steps (default: 100)\")\n flags.DEFINE_integer(\"checkpoint_every\", 500, \"Save model after this many steps (default: 100)\")\n\n flags.DEFINE_string('data','8008','data set')\n flags.DEFINE_string('pooling','max','max pooling or attentive pooling')\n flags.DEFINE_boolean('clean',False,'whether we clean the data')\n flags.DEFINE_string('conv','wide','wide conv or narrow')\n flags.DEFINE_integer('gpu',0,'gpu number')\n # Misc Parameters\n flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\n flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n return flags.FLAGS\n\n\n\n\nif __name__==\"__main__\":\n args=Singleton().get_test_flag()\n for attr, value in sorted(args.__flags.items()):\n print((\"{}={}\".format(attr.upper(), value)))\n " }, { "alpha_fraction": 0.6214203238487244, "alphanum_fraction": 0.6287038326263428, "avg_line_length": 35.82926940917969, "blob_id": "665f1e0131f8d181a8d0312cb47c1be1b50d61a6", "content_id": "cc2dad6eeaa9b7aa92221834173a0033d06a6005", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6041, "license_type": "no_license", "max_line_length": 161, "num_lines": 164, "path": "/run.py", "repo_name": "shuishen112/pairwise-rnn", "src_encoding": "UTF-8", "text": "from tensorflow import flags\nimport tensorflow as tf\nfrom config import Singleton\nimport data_helper\n\nimport datetime,os\n\nimport models\nimport numpy as np\nimport evaluation\n\nimport sys\nimport logging\n\nimport time\nnow = int(time.time())\ntimeArray = time.localtime(now)\ntimeStamp = time.strftime(\"%Y%m%d%H%M%S\", timeArray)\nlog_filename = \"log/\" +time.strftime(\"%Y%m%d\", timeArray)\n\nprogram = os.path.basename('program')\nlogger = logging.getLogger(program) \nif not os.path.exists(log_filename):\n os.makedirs(log_filename)\nlogging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w')\nlogging.root.setLevel(level=logging.INFO)\nlogger.info(\"running %s\" % ' '.join(sys.argv))\n \n\n\nfrom data_helper import log_time_delta,getLogger\n\nlogger=getLogger()\n \n\n\n\nargs = Singleton().get_qcnn_flag()\n\nargs._parse_flags()\nopts=dict()\nlogger.info(\"\\nParameters:\")\nfor attr, value in sorted(args.__flags.items()):\n logger.info((\"{}={}\".format(attr.upper(), value)))\n opts[attr]=value\n\n\ntrain,test,dev = data_helper.load(args.data,filter = args.clean)\n\nq_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))\na_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))\n\nalphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )\nlogger.info('the number of words :%d '%len(alphabet))\n\nif args.data==\"quora\" or args.data==\"8008\" :\n print(\"cn embedding\")\n embedding = data_helper.get_embedding(alphabet,dim=200,language=\"cn\",dataset=args.data )\n train_data_loader = data_helper.getBatch48008\nelse:\n embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )\n train_data_loader = data_helper.get_mini_batch\nopts[\"embeddings\"] =embedding\nopts[\"vocab_size\"]=len(alphabet)\nopts[\"max_input_right\"]=a_max_sent_length\nopts[\"max_input_left\"]=q_max_sent_length\nopts[\"filter_sizes\"]=list(map(int, args.filter_sizes.split(\",\")))\n\nprint(\"innitilize over\")\n\n\n \n \n#with tf.Graph().as_default(), tf.device(\"/gpu:\" + str(args.gpu)):\nwith tf.Graph().as_default(): \n # with tf.device(\"/cpu:0\"):\n session_conf = tf.ConfigProto()\n session_conf.allow_soft_placement = args.allow_soft_placement\n session_conf.log_device_placement = args.log_device_placement\n session_conf.gpu_options.allow_growth = True\n sess = tf.Session(config=session_conf)\n model=models.setup(opts)\n model.build_graph() \n saver = tf.train.Saver()\n \n# ckpt = tf.train.get_checkpoint_state(\"checkpoint\") \n# if ckpt and ckpt.model_checkpoint_path: \n# # Restores from checkpoint \n# saver.restore(sess, ckpt.model_checkpoint_path)\n# if os.path.exists(\"model\") : \n# import shutil\n# shutil.rmtree(\"model\") \n# builder = tf.saved_model.builder.SavedModelBuilder(\"./model\")\n# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])\n# builder.save(True)\n# variable_averages = tf.train.ExponentialMovingAverage( model) \n# variables_to_restore = variable_averages.variables_to_restore() \n# saver = tf.train.Saver(variables_to_restore) \n# for name in variables_to_restore: \n# print(name) \n\n sess.run(tf.global_variables_initializer())\n @log_time_delta\n def predict(model,sess,batch,test):\n scores = []\n for data in batch: \n score = model.predict(sess,data)\n scores.extend(score) \n return np.array(scores[:len(test)])\n \n best_p1=0\n \n \n\n \n for i in range(args.num_epoches): \n \n for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess):\n# for data in data_helper.getBatch48008(train,alphabet,args.batch_size):\n _, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}\".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))\n logger.info(\"{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}\".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))\n#<<<<<<< HEAD\n# \n# \n# if i>0 and i % 5 ==0:\n# test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)\n# \n# predicted_test = predict(model,sess,test_datas,test)\n# map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)\n# \n# logger.info('map_mrr test' +str(map_mrr_test))\n# print('map_mrr test' +str(map_mrr_test))\n# \n# test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size)\n# predicted_test = predict(model,sess,test_datas,dev)\n# map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test)\n# \n# logger.info('map_mrr dev' +str(map_mrr_test))\n# print('map_mrr dev' +str(map_mrr_test))\n# map,mrr,p1 = map_mrr_test\n# if p1>best_p1:\n# best_p1=p1\n# filename= \"checkpoint/\"+args.data+\"_\"+str(p1)+\".model\"\n# save_path = saver.save(sess, filename) \n# # load_path = saver.restore(sess, model_path)\n# \n# import shutil\n# shutil.rmtree(\"model\")\n# builder = tf.saved_model.builder.SavedModelBuilder(\"./model\")\n# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])\n# builder.save(True)\n# \n# \n#=======\n\n test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)\n\n predicted_test = predict(model,sess,test_datas,test)\n map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)\n\n logger.info('map_mrr test' +str(map_mrr_test))\n print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))\n\n" }, { "alpha_fraction": 0.5751004219055176, "alphanum_fraction": 0.5921624898910522, "avg_line_length": 46.68241500854492, "blob_id": "7d47fab78aaadcce7e98cd25cf21b033637ee4a2", "content_id": "fdf7b36e89dd53e4caa49ce0050896a2bd185b18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18169, "license_type": "no_license", "max_line_length": 147, "num_lines": 381, "path": "/models/QA_CNN_pairwise.py", "repo_name": "shuishen112/pairwise-rnn", "src_encoding": "UTF-8", "text": "#coding:utf-8\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib import rnn\nimport models.blocks as blocks\n# model_type :apn or qacnn\nclass QA_CNN_extend(object):\n# def __init__(self,max_input_left,max_input_right,batch_size,vocab_size,embedding_size,filter_sizes,num_filters,hidden_size,\n# dropout_keep_prob = 1,learning_rate = 0.001,embeddings = None,l2_reg_lambda = 0.0,trainable = True,pooling = 'attentive',conv = 'narrow'):\n#\n# \"\"\"\n# QA_RNN model for question answering\n#\n# Args:\n# self.dropout_keep_prob: dropout rate\n# self.num_filters : number of filters\n# self.para : parameter list\n# self.extend_feature_dim : my extend feature dimension\n# self.max_input_left : the length of question\n# self.max_input_right : the length of answer\n# self.pooling : pooling strategy :max pooling or attentive pooling\n# \n# \"\"\"\n# self.dropout_keep_prob = tf.placeholder(tf.float32,name = 'dropout_keep_prob')\n# self.num_filters = num_filters\n# self.embeddings = embeddings\n# self.embedding_size = embedding_size\n# self.batch_size = batch_size\n# self.filter_sizes = filter_sizes\n# self.l2_reg_lambda = l2_reg_lambda\n# self.para = []\n#\n# self.max_input_left = max_input_left\n# self.max_input_right = max_input_right\n# self.trainable = trainable\n# self.vocab_size = vocab_size\n# self.pooling = pooling\n# self.total_num_filter = len(self.filter_sizes) * self.num_filters\n#\n# self.conv = conv\n# self.pooling = 'traditional'\n# self.learning_rate = learning_rate\n#\n# self.hidden_size = hidden_size\n#\n# self.attention_size = 100\n def __init__(self,opt):\n for key,value in opt.items():\n self.__setattr__(key,value)\n self.attention_size = 100\n self.pooling = 'mean'\n self.total_num_filter = len(self.filter_sizes) * self.num_filters\n self.para = []\n self.dropout_keep_prob_holder = tf.placeholder(tf.float32,name = 'dropout_keep_prob')\n def create_placeholder(self):\n print(('Create placeholders'))\n # he length of the sentence is varied according to the batch,so the None,None\n self.question = tf.placeholder(tf.int32,[None,None],name = 'input_question')\n self.max_input_left = tf.shape(self.question)[1]\n \n self.batch_size = tf.shape(self.question)[0]\n self.answer = tf.placeholder(tf.int32,[None,None],name = 'input_answer')\n self.max_input_right = tf.shape(self.answer)[1]\n self.answer_negative = tf.placeholder(tf.int32,[None,None],name = 'input_right')\n # self.q_mask = tf.placeholder(tf.int32,[None,None],name = 'q_mask')\n # self.a_mask = tf.placeholder(tf.int32,[None,None],name = 'a_mask')\n # self.a_neg_mask = tf.placeholder(tf.int32,[None,None],name = 'a_neg_mask')\n\n def add_embeddings(self):\n print( 'add embeddings')\n if self.embeddings is not None:\n print( \"load embedding\")\n W = tf.Variable(np.array(self.embeddings),name = \"W\" ,dtype=\"float32\",trainable = self.trainable)\n \n else:\n print( \"random embedding\")\n W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),name=\"W\",trainable = self.trainable)\n self.embedding_W = W\n \n # self.overlap_W = tf.Variable(a,name=\"W\",trainable = True)\n self.para.append(self.embedding_W)\n\n self.q_embedding = tf.nn.embedding_lookup(self.embedding_W,self.question)\n\n\n self.a_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer)\n self.a_neg_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer_negative)\n #real length\n self.q_len,self.q_mask = blocks.length(self.question)\n self.a_len,self.a_mask = blocks.length(self.answer)\n self.a_neg_len,self.a_neg_mask = blocks.length(self.answer_negative)\n\n def convolution(self):\n print( 'convolution:wide_convolution')\n self.kernels = []\n for i,filter_size in enumerate(self.filter_sizes):\n with tf.name_scope('conv-max-pool-%s' % filter_size):\n filter_shape = [filter_size,self.embedding_size,1,self.num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name=\"W\")\n b = tf.Variable(tf.constant(0.0, shape=[self.num_filters]), name=\"b\")\n self.kernels.append((W,b))\n self.para.append(W)\n self.para.append(b)\n \n embeddings = [self.q_embedding,self.a_embedding,self.a_neg_embedding]\n\n self.q_cnn,self.a_cnn,self.a_neg_cnn = [self.wide_convolution(tf.expand_dims(embedding,-1)) for embedding in embeddings]\n\n #convolution\n def pooling_graph(self):\n if self.pooling == 'mean':\n\n self.q_pos_cnn = self.mean_pooling(self.q_cnn,self.q_mask)\n self.q_neg_cnn = self.mean_pooling(self.q_cnn,self.q_mask)\n self.a_pos_cnn = self.mean_pooling(self.a_cnn,self.a_mask)\n self.a_neg_cnn = self.mean_pooling(self.a_neg_cnn,self.a_neg_mask)\n elif self.pooling == 'attentive':\n self.q_pos_cnn,self.a_pos_cnn = self.attentive_pooling(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)\n self.q_neg_cnn,self.a_neg_cnn = self.attentive_pooling(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)\n elif self.pooling == 'position':\n self.q_pos_cnn,self.a_pos_cnn = self.position_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)\n self.q_neg_cnn,self.a_neg_cnn = self.position_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)\n elif self.pooling == 'traditional':\n print( self.pooling)\n print(self.q_cnn)\n self.q_pos_cnn,self.a_pos_cnn = self.traditional_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)\n self.q_neg_cnn,self.a_neg_cnn = self.traditional_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)\n\n def para_initial(self):\n # print((\"---------\"))\n # self.W_qp = tf.Variable(tf.truncated_normal(shape = [self.hidden_size * 2,1],stddev = 0.01,name = 'W_qp'))\n self.U = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'U'))\n self.W_hm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_hm'))\n self.W_qm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_qm'))\n self.W_ms = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,1],stddev = 0.01,name = 'W_ms'))\n self.M_qi = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.embedding_size],stddev = 0.01,name = 'M_qi'))\n\n\n\n def mean_pooling(self,conv,mask):\n \n conv = tf.squeeze(conv,2)\n print( tf.expand_dims(tf.cast(mask,tf.float32),-1))\n # conv_mask = tf.multiply(conv,tf.expand_dims(tf.cast(mask,tf.float32),-1))\n # self.see = conv_mask\n # print( conv_mask)\n return tf.reduce_mean(conv,axis = 1);\n def attentive_pooling(self,input_left,input_right,q_mask,a_mask):\n\n Q = tf.squeeze(input_left,axis = 2)\n A = tf.squeeze(input_right,axis = 2)\n print( Q)\n print( A)\n # Q = tf.reshape(input_left,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters],name = 'Q')\n # A = tf.reshape(input_right,[-1,self.max_input_right,len(self.filter_sizes) * self.num_filters],name = 'A')\n # G = tf.tanh(tf.matmul(tf.matmul(Q,self.U),\\\n # A,transpose_b = True),name = 'G')\n \n first = tf.matmul(tf.reshape(Q,[-1,len(self.filter_sizes) * self.num_filters]),self.U)\n second_step = tf.reshape(first,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters])\n result = tf.matmul(second_step,tf.transpose(A,perm = [0,2,1]))\n print( second_step)\n print( tf.transpose(A,perm = [0,2,1]))\n # print( 'result',result)\n G = tf.tanh(result)\n \n # G = result\n # column-wise pooling ,row-wise pooling\n row_pooling = tf.reduce_max(G,1,True,name = 'row_pooling')\n col_pooling = tf.reduce_max(G,2,True,name = 'col_pooling')\n \n self.attention_q = tf.nn.softmax(col_pooling,1,name = 'attention_q')\n self.attention_q_mask = tf.multiply(self.attention_q,tf.expand_dims(tf.cast(q_mask,tf.float32),-1))\n self.attention_a = tf.nn.softmax(row_pooling,name = 'attention_a')\n self.attention_a_mask = tf.multiply(self.attention_a,tf.expand_dims(tf.cast(a_mask,tf.float32),1))\n \n self.see = G\n\n R_q = tf.reshape(tf.matmul(Q,self.attention_q_mask,transpose_a = 1),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_q')\n R_a = tf.reshape(tf.matmul(self.attention_a_mask,A),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_a')\n\n return R_q,R_a\n\n def traditional_attention(self,input_left,input_right,q_mask,a_mask):\n input_left = tf.squeeze(input_left,axis = 2)\n input_right = tf.squeeze(input_right,axis = 2) \n\n input_left_mask = tf.multiply(input_left, tf.expand_dims(tf.cast(q_mask,tf.float32),2))\n Q = tf.reduce_mean(input_left_mask,1)\n a_shape = tf.shape(input_right)\n A = tf.reshape(input_right,[-1,self.total_num_filter])\n m_t = tf.nn.tanh(tf.reshape(tf.matmul(A,self.W_hm),[-1,a_shape[1],self.total_num_filter]) + tf.expand_dims(tf.matmul(Q,self.W_qm),1))\n f_attention = tf.exp(tf.reshape(tf.matmul(tf.reshape(m_t,[-1,self.total_num_filter]),self.W_ms),[-1,a_shape[1],1]))\n self.f_attention_mask = tf.multiply(f_attention,tf.expand_dims(tf.cast(a_mask,tf.float32),2))\n self.f_attention_norm = tf.divide(self.f_attention_mask,tf.reduce_sum(self.f_attention_mask,1,keep_dims = True))\n self.see = self.f_attention_norm\n a_attention = tf.reduce_sum(tf.multiply(input_right,self.f_attention_norm),1)\n return Q,a_attention\n def position_attention(self,input_left,input_right,q_mask,a_mask):\n input_left = tf.squeeze(input_left,axis = 2)\n input_right = tf.squeeze(input_right,axis = 2)\n # Q = tf.reshape(input_left,[-1,self.max_input_left,self.hidden_size*2],name = 'Q')\n # A = tf.reshape(input_right,[-1,self.max_input_right,self.hidden_size*2],name = 'A')\n\n Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)\n\n QU = tf.matmul(Q,self.U)\n QUA = tf.multiply(tf.expand_dims(QU,1),input_right)\n self.attention_a = tf.cast(tf.argmax(QUA,2)\n ,tf.float32)\n # q_shape = tf.shape(input_left)\n # Q_1 = tf.reshape(input_left,[-1,self.total_num_filter])\n # QU = tf.matmul(Q_1,self.U)\n # QU_1 = tf.reshape(QU,[-1,q_shape[1],self.total_num_filter])\n # A_1 = tf.transpose(input_right,[0,2,1])\n # QUA = tf.matmul(QU_1,A_1)\n # QUA = tf.nn.l2_normalize(QUA,1)\n\n # G = tf.tanh(QUA)\n # Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)\n # # self.Q_mask = tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2))\n # row_pooling = tf.reduce_max(G,1,name=\"row_pooling\")\n # col_pooling = tf.reduce_max(G,2,name=\"col_pooling\")\n # self.attention_a = tf.nn.softmax(row_pooling,1,name = \"attention_a\")\n self.attention_a_mask = tf.multiply(self.attention_a,tf.cast(a_mask,tf.float32))\n self.see = self.attention_a\n self.attention_a_norm = tf.divide(self.attention_a_mask,tf.reduce_sum(self.attention_a_mask,1,keep_dims =True))\n self.r_a = tf.reshape(tf.matmul(tf.transpose(input_right,[0,2,1]) ,tf.expand_dims(self.attention_a_norm,2)),[-1,self.total_num_filter])\n return Q ,self.r_a\n def create_loss(self):\n \n with tf.name_scope('score'):\n self.score12 = self.getCosine(self.q_pos_cnn,self.a_pos_cnn)\n self.score13 = self.getCosine(self.q_neg_cnn,self.a_neg_cnn)\n l2_loss = tf.constant(0.0)\n for p in self.para:\n l2_loss += tf.nn.l2_loss(p)\n with tf.name_scope(\"loss\"):\n self.losses = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.score12, self.score13)))\n self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * l2_loss\n tf.summary.scalar('loss', self.loss)\n # Accuracy\n with tf.name_scope(\"accuracy\"):\n self.correct = tf.equal(0.0, self.losses)\n self.accuracy = tf.reduce_mean(tf.cast(self.correct, \"float\"), name=\"accuracy\")\n tf.summary.scalar('accuracy', self.accuracy)\n def create_op(self):\n self.global_step = tf.Variable(0, name = \"global_step\", trainable = False)\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n self.grads_and_vars = self.optimizer.compute_gradients(self.loss)\n self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step = self.global_step)\n\n\n def max_pooling(self,conv,input_length):\n pooled = tf.nn.max_pool(\n conv,\n ksize = [1, input_length, 1, 1],\n strides = [1, 1, 1, 1],\n padding = 'VALID',\n name=\"pool\")\n return pooled\n def getCosine(self,q,a):\n pooled_flat_1 = tf.nn.dropout(q, self.dropout_keep_prob_holder)\n pooled_flat_2 = tf.nn.dropout(a, self.dropout_keep_prob_holder)\n \n pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_1), 1)) \n pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_2, pooled_flat_2), 1))\n pooled_mul_12 = tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_2), 1) \n score = tf.div(pooled_mul_12, tf.multiply(pooled_len_1, pooled_len_2), name=\"scores\") \n return score\n def wide_convolution(self,embedding):\n cnn_outputs = []\n for i,filter_size in enumerate(self.filter_sizes):\n conv = tf.nn.conv2d(\n embedding,\n self.kernels[i][0],\n strides=[1, 1, self.embedding_size, 1],\n padding='SAME',\n name=\"conv-1\"\n )\n h = tf.nn.relu(tf.nn.bias_add(conv, self.kernels[i][1]), name=\"relu-1\")\n cnn_outputs.append(h)\n cnn_reshaped = tf.concat(cnn_outputs,3)\n return cnn_reshaped\n \n def variable_summaries(self,var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n def build_graph(self):\n self.create_placeholder()\n self.add_embeddings()\n self.para_initial()\n self.convolution()\n self.pooling_graph()\n self.create_loss()\n self.create_op()\n self.merged = tf.summary.merge_all()\n\n def train(self,sess,data):\n feed_dict = {\n self.question:data[0],\n self.answer:data[1],\n self.answer_negative:data[2],\n # self.q_mask:data[3],\n # self.a_mask:data[4],\n # self.a_neg_mask:data[5],\n self.dropout_keep_prob_holder:self.dropout_keep_prob\n }\n\n _, summary, step, loss, accuracy,score12, score13, see = sess.run(\n [self.train_op, self.merged,self.global_step,self.loss, self.accuracy,self.score12,self.score13, self.see],\n feed_dict)\n return _, summary, step, loss, accuracy,score12, score13, see\n def predict(self,sess,data):\n feed_dict = {\n self.question:data[0],\n self.answer:data[1],\n # self.q_mask:data[2],\n # self.a_mask:data[3],\n self.dropout_keep_prob_holder:1.0\n } \n score = sess.run( self.score12, feed_dict) \n return score\n\n \nif __name__ == '__main__':\n \n cnn = QA_CNN_extend(\n max_input_left = 33,\n max_input_right = 40,\n batch_size = 3,\n vocab_size = 5000,\n embedding_size = 100,\n filter_sizes = [3,4,5],\n num_filters = 64, \n hidden_size = 100,\n dropout_keep_prob = 1.0,\n embeddings = None,\n l2_reg_lambda = 0.0,\n trainable = True,\n\n pooling = 'max',\n conv = 'wide')\n cnn.build_graph()\n input_x_1 = np.reshape(np.arange(3 * 33),[3,33])\n input_x_2 = np.reshape(np.arange(3 * 40),[3,40])\n input_x_3 = np.reshape(np.arange(3 * 40),[3,40])\n q_mask = np.ones((3,33))\n a_mask = np.ones((3,40))\n a_neg_mask = np.ones((3,40))\n \n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n feed_dict = {\n cnn.question:input_x_1,\n cnn.answer:input_x_2,\n # cnn.answer_negative:input_x_3,\n cnn.q_mask:q_mask,\n cnn.a_mask:a_mask,\n cnn.dropout_keep_prob_holder:cnn.dropout_keep\n # cnn.a_neg_mask:a_neg_mask\n # cnn.q_pos_overlap:q_pos_embedding,\n # cnn.q_neg_overlap:q_neg_embedding,\n # cnn.a_pos_overlap:a_pos_embedding,\n # cnn.a_neg_overlap:a_neg_embedding,\n # cnn.q_position:q_position,\n # cnn.a_pos_position:a_pos_position,\n # cnn.a_neg_position:a_neg_position\n }\n question,answer,score = sess.run([cnn.question,cnn.answer,cnn.score12],feed_dict)\n print( question.shape,answer.shape)\n print( score)\n\n\n" }, { "alpha_fraction": 0.55800461769104, "alphanum_fraction": 0.5727543830871582, "avg_line_length": 36.712501525878906, "blob_id": "13fc5179791d9cc7f461bcaa7b8963af9678d8b9", "content_id": "1431101048ffdcb8d2b923b4f805f1a880ea0716", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6034, "license_type": "no_license", "max_line_length": 116, "num_lines": 160, "path": "/models/my/nn.py", "repo_name": "shuishen112/pairwise-rnn", "src_encoding": "UTF-8", "text": "from my.general import flatten, reconstruct, add_wd, exp_mask\n\nimport numpy as np\nimport tensorflow as tf\n\n_BIAS_VARIABLE_NAME = \"bias\"\n_WEIGHTS_VARIABLE_NAME = \"kernel\"\n\n\n\ndef linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,\n is_train=None):#, name_w='', name_b=''\n # if args is None or (nest.is_sequence(args) and not args):\n # raise ValueError(\"`args` must be specified\")\n # if not nest.is_sequence(args):\n # args = [args]\n\n flat_args = [flatten(arg, 1) for arg in args]#[210,20]\n\n # if input_keep_prob < 1.0:\n # assert is_train is not None\n flat_args = [tf.nn.dropout(arg, input_keep_prob) for arg in flat_args]\n \n total_arg_size = 0#[60]\n shapes = [a.get_shape() for a in flat_args]\n for shape in shapes:\n if shape.ndims != 2:\n raise ValueError(\"linear is expecting 2D arguments: %s\" % shapes)\n if shape[1].value is None:\n raise ValueError(\"linear expects shape[1] to be provided for shape %s, \"\n \"but saw %s\" % (shape, shape[1]))\n else:\n total_arg_size += shape[1].value\n # print(total_arg_size)\n # exit()\n dtype = [a.dtype for a in flat_args][0] \n\n # scope = tf.get_variable_scope()\n with tf.variable_scope(scope) as outer_scope:\n weights = tf.get_variable(_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype)\n if len(flat_args) == 1:\n res = tf.matmul(flat_args[0], weights)\n else: \n res = tf.matmul(tf.concat(flat_args, 1), weights)\n if not bias:\n flat_out = res\n else:\n with tf.variable_scope(outer_scope) as inner_scope:\n inner_scope.set_partitioner(None)\n biases = tf.get_variable(\n _BIAS_VARIABLE_NAME, [output_size],\n dtype=dtype,\n initializer=tf.constant_initializer(bias_start, dtype=dtype))\n flat_out = tf.nn.bias_add(res, biases) \n\n out = reconstruct(flat_out, args[0], 1)\n\n if squeeze:\n out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])\n if wd:\n add_wd(wd)\n\n return out\n\ndef softmax(logits, mask=None, scope=None):\n with tf.name_scope(scope or \"Softmax\"):\n if mask is not None:\n logits = exp_mask(logits, mask)\n flat_logits = flatten(logits, 1)\n flat_out = tf.nn.softmax(flat_logits)\n out = reconstruct(flat_out, logits, 1)\n\n return out\n\n\ndef softsel(target, logits, mask=None, scope=None):\n \"\"\"\n\n :param target: [ ..., J, d] dtype=float\n :param logits: [ ..., J], dtype=float\n :param mask: [ ..., J], dtype=bool\n :param scope:\n :return: [..., d], dtype=float\n \"\"\"\n with tf.name_scope(scope or \"Softsel\"):\n a = softmax(logits, mask = mask)\n target_rank = len(target.get_shape().as_list())\n out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2)\n return out\n\ndef highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):\n with tf.variable_scope(scope or \"highway_layer\"):\n d = arg.get_shape()[-1]\n trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob)\n trans = tf.nn.relu(trans)\n gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob)\n gate = tf.nn.sigmoid(gate)\n out = gate * trans + (1 - gate) * arg\n return out\n\n\ndef highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):\n with tf.variable_scope(scope or \"highway_network\"):\n prev = arg\n cur = None\n for layer_idx in range(num_layers):\n cur = highway_layer(prev, bias, bias_start=bias_start, scope=\"layer_{}\".format(layer_idx), wd=wd,\n input_keep_prob=input_keep_prob)\n prev = cur\n return cur\n\ndef conv1d(in_, filter_size, height, padding, keep_prob=1.0, scope=None):\n with tf.variable_scope(scope or \"conv1d\"):\n num_channels = in_.get_shape()[-1]\n filter_ = tf.get_variable(\"filter\", shape=[1, height, num_channels, filter_size], dtype='float')\n bias = tf.get_variable(\"bias\", shape=[filter_size], dtype='float')\n strides = [1, 1, 1, 1]\n in_ = tf.nn.dropout(in_, keep_prob)\n xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias # [N*M, JX, W/filter_stride, d]\n out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d]\n return out\n\n\ndef multi_conv1d(in_, filter_sizes, heights, padding, keep_prob=1.0, scope=None):\n with tf.variable_scope(scope or \"multi_conv1d\"):\n assert len(filter_sizes) == len(heights)\n outs = []\n for filter_size, height in zip(filter_sizes, heights):\n if filter_size == 0:\n continue\n out = conv1d(in_, filter_size, height, padding, keep_prob=keep_prob, scope=\"conv1d_{}\".format(height))\n outs.append(out)\n concat_out = tf.concat(outs, axis=2)\n return concat_out\n\n\nif __name__ == '__main__':\n a = tf.Variable(np.random.random(size=(2,2,4)))\n b = tf.Variable(np.random.random(size=(2,3,4)))\n c = tf.tile(tf.expand_dims(a, 2), [1, 1, 3, 1])\n test = flatten(c,1)\n out = reconstruct(test, c, 1)\n d = tf.tile(tf.expand_dims(b, 1), [1, 2, 1, 1])\n e = linear([c,d,c*d],1,bias = False,scope = \"test\",)\n # f = softsel(d, e)\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n print(sess.run(test))\n print(sess.run(tf.shape(out)))\n exit()\n print(sess.run(tf.shape(a)))\n print(sess.run(a))\n print(sess.run(tf.shape(b)))\n print(sess.run(b))\n print(sess.run(tf.shape(c)))\n print(sess.run(c)) \n print(sess.run(tf.shape(d)))\n print(sess.run(d))\n print(sess.run(tf.shape(e)))\n print(sess.run(e))\n" }, { "alpha_fraction": 0.6890080571174622, "alphanum_fraction": 0.6916890144348145, "avg_line_length": 25.64285659790039, "blob_id": "8c86768e5cc3de0f456a94685dd859b87e3407f2", "content_id": "9247d48d1c7da0097f56321d9547ef1e8a477f03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 373, "license_type": "no_license", "max_line_length": 58, "num_lines": 14, "path": "/models/__init__.py", "repo_name": "shuishen112/pairwise-rnn", "src_encoding": "UTF-8", "text": "from .QA_CNN_pairwise import QA_CNN_extend as CNN\nfrom .QA_RNN_pairwise import QA_RNN_extend as RNN\nfrom .QA_CNN_quantum_pairwise import QA_CNN_extend as QCNN\ndef setup(opt):\n\tif opt[\"model_name\"]==\"cnn\":\n\t\tmodel=CNN(opt)\n\telif opt[\"model_name\"]==\"rnn\":\n\t\tmodel=RNN(opt)\n\telif opt['model_name']=='qcnn':\n\t\tmodel=QCNN(opt)\n\telse:\n\t\tprint(\"no model\")\n\t\texit(0)\n\treturn model\n" }, { "alpha_fraction": 0.6432403326034546, "alphanum_fraction": 0.6510193347930908, "avg_line_length": 31.622806549072266, "blob_id": "28b715cf6bb51c99b929ed45da9f69c1ba1bc1a7", "content_id": "fff94ecfc1430b23bc6647820fec02c2fe375414", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3744, "license_type": "no_license", "max_line_length": 92, "num_lines": 114, "path": "/test.py", "repo_name": "shuishen112/pairwise-rnn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom tensorflow import flags\nimport tensorflow as tf\nfrom config import Singleton\nimport data_helper\n\nimport datetime\nimport os\nimport models\nimport numpy as np\nimport evaluation\n\nfrom data_helper import log_time_delta,getLogger\n\nlogger=getLogger()\n \n\n\nargs = Singleton().get_rnn_flag()\n#args = Singleton().get_8008_flag()\n\nargs._parse_flags()\nopts=dict()\nlogger.info(\"\\nParameters:\")\nfor attr, value in sorted(args.__flags.items()):\n logger.info((\"{}={}\".format(attr.upper(), value)))\n opts[attr]=value\n\n\ntrain,test,dev = data_helper.load(args.data,filter = args.clean)\n\nq_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))\na_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))\n\nalphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )\nlogger.info('the number of words :%d '%len(alphabet))\n\nif args.data==\"quora\" or args.data==\"8008\" :\n print(\"cn embedding\")\n embedding = data_helper.get_embedding(alphabet,dim=200,language=\"cn\",dataset=args.data )\n train_data_loader = data_helper.getBatch48008\nelse:\n embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )\n train_data_loader = data_helper.get_mini_batch\nopts[\"embeddings\"] =embedding\nopts[\"vocab_size\"]=len(alphabet)\nopts[\"max_input_right\"]=a_max_sent_length\nopts[\"max_input_left\"]=q_max_sent_length\nopts[\"filter_sizes\"]=list(map(int, args.filter_sizes.split(\",\")))\n\nprint(\"innitilize over\")\n\n\n \n \n#with tf.Graph().as_default(), tf.device(\"/gpu:\" + str(args.gpu)):\nwith tf.Graph().as_default(): \n # with tf.device(\"/cpu:0\"):\n session_conf = tf.ConfigProto()\n session_conf.allow_soft_placement = args.allow_soft_placement\n session_conf.log_device_placement = args.log_device_placement\n session_conf.gpu_options.allow_growth = True\n sess = tf.Session(config=session_conf)\n model=models.setup(opts)\n model.build_graph() \n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer()) # fun first than print or save\n \n \n ckpt = tf.train.get_checkpoint_state(\"checkpoint\") \n if ckpt and ckpt.model_checkpoint_path: \n # Restores from checkpoint \n saver.restore(sess, ckpt.model_checkpoint_path)\n print(sess.run(model.position_embedding)[0])\n if os.path.exists(\"model\") : \n import shutil\n shutil.rmtree(\"model\")\n builder = tf.saved_model.builder.SavedModelBuilder(\"./model\")\n builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])\n builder.save(True)\n variable_averages = tf.train.ExponentialMovingAverage( model) \n variables_to_restore = variable_averages.variables_to_restore() \n saver = tf.train.Saver(variables_to_restore) \n for name in variables_to_restore: \n print(name) \n \n @log_time_delta\n def predict(model,sess,batch,test):\n scores = []\n for data in batch: \n score = model.predict(sess,data)\n scores.extend(score) \n return np.array(scores[:len(test)])\n \n \n text = \"怎么 提取 公积金 ?\"\n \n splited_text=data_helper.encode_to_split(text,alphabet)\n\n mb_q,mb_q_mask = data_helper.prepare_data([splited_text])\n mb_a,mb_a_mask = data_helper.prepare_data([splited_text])\n \n data = (mb_q,mb_a,mb_q_mask,mb_a_mask)\n score = model.predict(sess,data)\n print(score)\n feed_dict = {\n model.question:data[0],\n model.answer:data[1],\n model.q_mask:data[2],\n model.a_mask:data[3],\n model.dropout_keep_prob_holder:1.0\n } \n sess.run(model.position_embedding,feed_dict=feed_dict)[0]\n\n \n " }, { "alpha_fraction": 0.574804425239563, "alphanum_fraction": 0.5823886394500732, "avg_line_length": 33.48760223388672, "blob_id": "f93760d01f42bdbab9b03cfd3bf136ead4ada42d", "content_id": "f8d8ba70450dfeb327a992e976f47c106b636729", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12526, "license_type": "no_license", "max_line_length": 167, "num_lines": 363, "path": "/data_helper.py", "repo_name": "shuishen112/pairwise-rnn", "src_encoding": "UTF-8", "text": "#-*- coding:utf-8 -*-\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport string\nfrom collections import Counter\nimport pandas as pd\n\nfrom tqdm import tqdm\nimport random\nfrom functools import wraps\nimport time\nimport pickle\ndef log_time_delta(func):\n @wraps(func)\n def _deco(*args, **kwargs):\n start = time.time()\n ret = func(*args, **kwargs)\n end = time.time()\n delta = end - start\n print( \"%s runed %.2f seconds\"% (func.__name__,delta))\n return ret\n return _deco\n\nimport tqdm\nfrom nltk.corpus import stopwords\n\n\nOVERLAP = 237\nclass Alphabet(dict):\n def __init__(self, start_feature_id = 1):\n self.fid = start_feature_id\n\n def add(self, item):\n idx = self.get(item, None)\n if idx is None:\n idx = self.fid\n self[item] = idx\n # self[idx] = item\n self.fid += 1\n return idx\n\n def dump(self, fname):\n with open(fname, \"w\") as out:\n for k in sorted(self.keys()):\n out.write(\"{}\\t{}\\n\".format(k, self[k]))\ndef cut(sentence):\n \n tokens = sentence.lower().split()\n # tokens = [w for w in tokens if w not in stopwords.words('english')]\n return tokens\n@log_time_delta\ndef load(dataset, filter = False):\n data_dir = \"data/\" + dataset\n datas = []\n for data_name in ['train.txt','test.txt','dev.txt']:\n data_file = os.path.join(data_dir,data_name)\n data = pd.read_csv(data_file,header = None,sep=\"\\t\",names=[\"question\",\"answer\",\"flag\"]).fillna('0')\n# data = pd.read_csv(data_file,header = None,sep=\"\\t\",names=[\"question\",\"answer\",\"flag\"],quoting =3).fillna('0')\n if filter == True:\n datas.append(removeUnanswerdQuestion(data))\n else:\n datas.append(data)\n # sub_file = os.path.join(data_dir,'submit.txt')\n # submit = pd.read_csv(sub_file,header = None,sep = \"\\t\",names = ['question','answer'],quoting = 3)\n # datas.append(submit)\n return tuple(datas)\n@log_time_delta\ndef removeUnanswerdQuestion(df):\n counter= df.groupby(\"question\").apply(lambda group: sum(group[\"flag\"]))\n questions_have_correct=counter[counter>0].index\n counter= df.groupby(\"question\").apply(lambda group: sum(group[\"flag\"]==0))\n questions_have_uncorrect=counter[counter>0].index\n counter=df.groupby(\"question\").apply(lambda group: len(group[\"flag\"]))\n questions_multi=counter[counter>1].index\n\n return df[df[\"question\"].isin(questions_have_correct) & df[\"question\"].isin(questions_have_correct) & df[\"question\"].isin(questions_have_uncorrect)].reset_index()\n@log_time_delta\ndef get_alphabet(corpuses=None,dataset=\"\"):\n pkl_name=\"temp/\"+dataset+\".alphabet.pkl\"\n if os.path.exists(pkl_name):\n return pickle.load(open(pkl_name,\"rb\"))\n alphabet = Alphabet(start_feature_id = 0)\n alphabet.add('[UNK]') \n alphabet.add('END') \n count = 0\n for corpus in corpuses:\n for texts in [corpus[\"question\"].unique(),corpus[\"answer\"]]:\n\n for sentence in texts: \n tokens = cut(sentence)\n for token in set(tokens):\n alphabet.add(token)\n print(\"alphabet size %d\" % len(alphabet.keys()) )\n if not os.path.exists(\"temp\"):\n os.mkdir(\"temp\")\n pickle.dump( alphabet,open(pkl_name,\"wb\"))\n return alphabet\n@log_time_delta\ndef getSubVectorsFromDict(vectors,vocab,dim = 300):\n embedding = np.zeros((len(vocab),dim))\n count = 1\n for word in vocab:\n if word in vectors:\n count += 1\n embedding[vocab[word]]= vectors[word]\n else:\n embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()\n print( 'word in embedding',count)\n return embedding\ndef encode_to_split(sentence,alphabet):\n indices = [] \n tokens = cut(sentence)\n seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens]\n return seq\n@log_time_delta\ndef load_text_vec(alphabet,filename=\"\",embedding_size = 100):\n vectors = {}\n with open(filename,encoding='utf-8') as f:\n i = 0\n for line in f:\n i += 1\n if i % 100000 == 0:\n print( 'epch %d' % i)\n items = line.strip().split(' ')\n if len(items) == 2:\n vocab_size, embedding_size= items[0],items[1]\n print( ( vocab_size, embedding_size))\n else:\n word = items[0]\n if word in alphabet:\n vectors[word] = items[1:]\n print( 'embedding_size',embedding_size)\n print( 'done')\n print( 'words found in wor2vec embedding ',len(vectors.keys()))\n return vectors\n@log_time_delta\ndef get_embedding(alphabet,dim = 300,language =\"en\",dataset=\"\"):\n pkl_name=\"temp/\"+dataset+\".subembedding.pkl\"\n if os.path.exists(pkl_name):\n return pickle.load(open(pkl_name,\"rb\"))\n if language==\"en\":\n fname = 'embedding/glove.6B/glove.6B.300d.txt'\n else:\n fname= \"embedding/embedding.200.header_txt\"\n embeddings = load_text_vec(alphabet,fname,embedding_size = dim)\n sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim)\n pickle.dump( sub_embeddings,open(pkl_name,\"wb\"))\n return sub_embeddings\n\n@log_time_delta\ndef get_mini_batch_test(df,alphabet,batch_size):\n q = []\n a = []\n pos_overlap = []\n for index,row in df.iterrows():\n question = encode_to_split(row[\"question\"],alphabet)\n answer = encode_to_split(row[\"answer\"],alphabet)\n overlap_pos = overlap_index(row['question'],row['answer'])\n q.append(question)\n a.append(answer)\n pos_overlap.append(overlap_pos)\n\n m = 0\n n = len(q)\n idx_list = np.arange(m,n,batch_size)\n mini_batches = []\n for idx in idx_list:\n mini_batches.append(np.arange(idx,min(idx + batch_size,n)))\n for mini_batch in mini_batches:\n mb_q = [ q[t] for t in mini_batch]\n mb_a = [ a[t] for t in mini_batch]\n mb_pos_overlap = [pos_overlap[t] for t in mini_batch]\n mb_q,mb_q_mask = prepare_data(mb_q)\n mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)\n\n\n yield(mb_q,mb_a)\n\n# calculate the overlap_index\ndef overlap_index(question,answer,stopwords = []):\n ans_token = cut(answer)\n qset = set(cut(question))\n aset = set(ans_token)\n a_len = len(ans_token)\n\n # q_index = np.arange(1,q_len)\n a_index = np.arange(1,a_len + 1)\n\n overlap = qset.intersection(aset)\n # for i,q in enumerate(cut(question)[:q_len]):\n # value = 1\n # if q in overlap:\n # value = 2\n # q_index[i] = value\n for i,a in enumerate(ans_token):\n if a in overlap:\n a_index[i] = OVERLAP\n return a_index\n\n\n\ndef getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False):\n q,a,neg_a=[],[],[]\n answers=df[\"answer\"][:250]\n ground_truth=df.groupby(\"question\").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict() \n \n for question in tqdm(df['question'].unique()):\n \n index= ground_truth[question] \n \n canindates = [i for i in range(250)]\n canindates.remove(index)\n a_neg_index = random.choice(canindates)\n\n seq_q = encode_to_split(question,alphabet)\n seq_a = encode_to_split(answers[index],alphabet)\n seq_neg_a = encode_to_split(answers[a_neg_index],alphabet)\n \n q.append(seq_q) \n a.append( seq_a)\n neg_a.append(seq_neg_a )\n \n return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle) \ndef iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False):\n\n\n if sort_by_len:\n sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True)\n q = [ q[i] for i in sorted_index]\n a = [a[i] for i in sorted_index]\n neg_a = [ neg_a[i] for i in sorted_index]\n\n pos_overlap = [pos_overlap[i] for i in sorted_index]\n neg_overlap = [neg_overlap[i] for i in sorted_index]\n\n #get batch\n m = 0\n n = len(q)\n\n idx_list = np.arange(m,n,batch_size)\n if shuffle:\n np.random.shuffle(idx_list)\n\n mini_batches = []\n for idx in idx_list:\n mini_batches.append(np.arange(idx,min(idx + batch_size,n)))\n\n for mini_batch in tqdm(mini_batches):\n mb_q = [ q[t] for t in mini_batch]\n mb_a = [ a[t] for t in mini_batch]\n mb_neg_a = [ neg_a[t] for t in mini_batch]\n mb_pos_overlap = [pos_overlap[t] for t in mini_batch]\n mb_neg_overlap = [neg_overlap[t] for t in mini_batch]\n mb_q,mb_q_mask = prepare_data(mb_q)\n mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)\n mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap)\n # mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap)\n\n # mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a)\n\n\n yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask)\n\n\ndef get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None):\n q = []\n a = []\n neg_a = []\n for question in df['question'].unique():\n# group = df[df[\"question\"]==question]\n# pos_answers = group[df[\"flag\"] == 1][\"answer\"]\n# neg_answers = group[df[\"flag\"] == 0][\"answer\"].reset_index()\n group = df[df[\"question\"]==question]\n pos_answers = group[group[\"flag\"] == 1][\"answer\"]\n neg_answers = group[group[\"flag\"] == 0][\"answer\"]#.reset_index()\n\n for pos in pos_answers:\n \n if model is not None and sess is not None:\n \n pos_sent= encode_to_split(pos,alphabet)\n q_sent,q_mask= prepare_data([pos_sent])\n \n neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers] \n\n a_sent,a_mask= prepare_data(neg_sents) \n \n scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask))\n neg_index = scores.argmax()\n \n\n \n else:\n\n if len(neg_answers.index) > 0:\n neg_index = np.random.choice(neg_answers.index)\n neg = neg_answers.reset_index().loc[neg_index,][\"answer\"]\n seq_q = encode_to_split(question,alphabet)\n seq_a = encode_to_split(pos,alphabet)\n seq_neg_a = encode_to_split(neg,alphabet)\n q.append(seq_q)\n a.append(seq_a)\n neg_a.append(seq_neg_a)\n return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)\n \n\ndef prepare_data(seqs,overlap = None):\n\n lengths = [len(seq) for seq in seqs]\n n_samples = len(seqs)\n max_len = np.max(lengths)\n\n x = np.zeros((n_samples,max_len)).astype('int32')\n if overlap is not None:\n overlap_position = np.zeros((n_samples,max_len)).astype('float')\n\n for idx ,seq in enumerate(seqs):\n x[idx,:lengths[idx]] = seq\n overlap_position[idx,:lengths[idx]] = overlap[idx]\n return x,overlap_position\n else:\n x_mask = np.zeros((n_samples, max_len)).astype('float')\n for idx, seq in enumerate(seqs):\n x[idx, :lengths[idx]] = seq\n x_mask[idx, :lengths[idx]] = 1.0\n # print( x, x_mask)\n return x, x_mask\n\n# def prepare_data(seqs):\n# lengths = [len(seq) for seq in seqs]\n# n_samples = len(seqs)\n# max_len = np.max(lengths)\n\n# x = np.zeros((n_samples, max_len)).astype('int32')\n# x_mask = np.zeros((n_samples, max_len)).astype('float')\n# for idx, seq in enumerate(seqs):\n# x[idx, :lengths[idx]] = seq\n# x_mask[idx, :lengths[idx]] = 1.0\n# # print( x, x_mask)\n# return x, x_mask\n \n\ndef getLogger():\n import sys\n import logging\n import os\n import time\n now = int(time.time()) \n timeArray = time.localtime(now)\n timeStamp = time.strftime(\"%Y%m%d%H%M%S\", timeArray)\n log_filename = \"log/\" +time.strftime(\"%Y%m%d\", timeArray)\n \n program = os.path.basename(sys.argv[0])\n logger = logging.getLogger(program) \n if not os.path.exists(log_filename):\n os.mkdir(log_filename)\n logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w')\n logging.root.setLevel(level=logging.INFO)\n logger.info(\"running %s\" % ' '.join(sys.argv))\n \n return logger\n\n\n\n\n\n\n\n" } ]
8
pablor0mero/Placester_Test_Pablo_Romero
https://github.com/pablor0mero/Placester_Test_Pablo_Romero
a49b3f184e5668aeda581c69b6ba9ee6d51c273f
4ec944a5c65a34e7a722928624dbd8095f385350
e39447127964073716c0b31559d3b32ee0166c8e
refs/heads/master
"2021-01-16T20:04:35.004000"
"2017-08-13T18:51:54"
"2017-08-13T18:51:54"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6668689250946045, "alphanum_fraction": 0.6708130836486816, "avg_line_length": 44.9295768737793, "blob_id": "df9bdd4626eba5ff9b33e2259d936a147f3a36a9", "content_id": "7529ffc9955755db70909e3dba74a02e9b95bd26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3296, "license_type": "no_license", "max_line_length": 157, "num_lines": 71, "path": "/main.py", "repo_name": "pablor0mero/Placester_Test_Pablo_Romero", "src_encoding": "UTF-8", "text": "# For this solution I'm using TextBlob, using it's integration with WordNet.\n\nfrom textblob import TextBlob\nfrom textblob import Word\nfrom textblob.wordnet import VERB\nimport nltk\nimport os\nimport sys\nimport re\nimport json\n\nresults = { \"results\" : [] }\n\n#Override NLTK data path to use the one I uploaded in the folder\ndir_path = os.path.dirname(os.path.realpath(__file__))\nnltk_path = dir_path + os.path.sep + \"nltk_data\"\nnltk.data.path= [nltk_path]\n\n#Text to analyze\nTEXT = \"\"\"\n Take this paragraph of text and return an alphabetized list of ALL unique words. A unique word is any form of a word often communicated\n with essentially the same meaning. For example,\n fish and fishes could be defined as a unique word by using their stem fish. For each unique word found in this entire paragraph,\n determine the how many times the word appears in total.\n Also, provide an analysis of what sentence index position or positions the word is found.\n The following words should not be included in your analysis or result set: \"a\", \"the\", \"and\", \"of\", \"in\", \"be\", \"also\" and \"as\".\n Your final result MUST be displayed in a readable console output in the same format as the JSON sample object shown below. \n \"\"\"\nTEXT = TEXT.lower()\n\nWORDS_NOT_TO_CONSIDER = [\"a\", \"the\", \"and\", \"of\", \"in\", \"be\", \"also\", \"as\"]\nnlpText= TextBlob(TEXT)\n\ndef getSentenceIndexesForWord(word, sentences):\n sentenceIndexes = []\n for index, sentence in enumerate(sentences):\n count = sum(1 for _ in re.finditer(r'\\b%s\\b' % re.escape(word.lower()), sentence))\n if count > 0:\n sentenceIndexes.append(index)\n return sentenceIndexes\n\n#1: Get all words, excluding repetitions and all the sentences in the text\nnlpTextWords = sorted(set(nlpText.words))\nnlpTextSentences = nlpText.raw_sentences\n\n#2 Get results\nsynonymsList = []\nallreadyReadWords = []\nfor word in nlpTextWords:\n if word not in WORDS_NOT_TO_CONSIDER and word not in allreadyReadWords:\n timesInText = nlpText.word_counts[word]\n \n #Get sentence indexes where the word can be found\n sentenceIndexes = getSentenceIndexesForWord(word, nlpTextSentences)\n\n #Check for synonyms\n for word2 in nlpTextWords:\n if word2 not in WORDS_NOT_TO_CONSIDER and ( word.lower() != word2.lower() and len(list(set(word.synsets) & set(word2.synsets))) > 0 ):\n #If I find a synonym of the word I add it to the list of words allready read and add the times that synonym appeared in the text to the total\n #count of the unique word and the corresponding sentence indexes\n allreadyReadWords.append(word2)\n timesInText = timesInText + nlpText.word_counts[word2]\n sentenceIndexes += getSentenceIndexesForWord(word2,nlpTextSentences)\n \n allreadyReadWords.append(word)\n \n results[\"results\"].append({\"word\" : word.lemmatize(), #I return the lemma of the word because TextBlob's stems seem to be wrong for certain words\n \"total-occurances\": timesInText,\n \"sentence-indexes\": sorted(set(sentenceIndexes))})\n\nprint(json.dumps(results, indent=4))\n \n \n \n" } ]
1
GabinCleaver/Auto_Discord_Bump
https://github.com/GabinCleaver/Auto_Discord_Bump
bfba81b0abc3134a84ed9a5656f98f762452ad56
3014435e39416664c95d1f72b2665359bd278f3f
2e1aacf4158173933cf7ac6e75348621b3d16e2c
refs/heads/main
"2023-05-25T19:52:33.283000"
"2021-06-06T21:21:51"
"2021-06-06T21:21:51"
374,466,085
5
0
null
null
null
null
null
[ { "alpha_fraction": 0.6064257025718689, "alphanum_fraction": 0.7670682668685913, "avg_line_length": 30.125, "blob_id": "a4f7075323d04905caaa5f7a96179923d1ba6a1a", "content_id": "97f47b4fc5377d7a991164b4694e147f02bf7786", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 258, "license_type": "permissive", "max_line_length": 110, "num_lines": 8, "path": "/README.md", "repo_name": "GabinCleaver/Auto_Discord_Bump", "src_encoding": "UTF-8", "text": "# Auto Discord Bump\n❗ Un auto bump pour discord totalement fait en Python par moi, et en français.\n\n💖 Enjoy !\n\n🎫 Mon Discord: Gabin#7955\n\n![auto](https://user-images.githubusercontent.com/79531012/120940519-f565ca80-c71d-11eb-8df8-da8134308fe7.png)\n" }, { "alpha_fraction": 0.5115303993225098, "alphanum_fraction": 0.5618448853492737, "avg_line_length": 20.809524536132812, "blob_id": "50515b08e5829ad8acf4e4e1656a88d0a98cc4ee", "content_id": "f795556e8139b36fc6b731e2de1cc0843fa66856", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 478, "license_type": "permissive", "max_line_length": 109, "num_lines": 21, "path": "/autobump.py", "repo_name": "GabinCleaver/Auto_Discord_Bump", "src_encoding": "UTF-8", "text": "import requests\r\nimport time\r\n\r\ntoken = \"TOKEN\"\r\n\r\nheaders = {\r\n 'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7',\r\n 'Authorization' : token\r\n}\r\n\r\nid = input(f\"[?] Salon ID: \")\r\nprint(\"\")\r\n\r\nwhile True:\r\n requests.post(\r\n f\"https://discord.com/api/channels/{id}/messages\",\r\n headers = headers,\r\n json = {\"content\" : \"!d bump\"}\r\n )\r\n print(\"[+] Serveur Bumpé\")\r\n time.sleep(121 * 60)" } ]
2
altopalido/yelp_python
https://github.com/altopalido/yelp_python
e8c02f7c570415e1a296b53ed1cef1dcda5c6d6a
b0af5b8209ca079bfc76b00236357306650677fb
100026e60fee0036b3a99719f2a601b1d48cb5e7
refs/heads/master
"2021-05-09T05:00:03.593000"
"2018-01-28T20:16:32"
"2018-01-28T20:16:32"
119,293,367
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8148148059844971, "alphanum_fraction": 0.8148148059844971, "avg_line_length": 53, "blob_id": "7aa27cde32be0d0b17a6b1e5ae85fc2a3aa51a62", "content_id": "a5a3babda1d3b7a1c6d3e1ce70d4ccd247f1e418", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 108, "license_type": "no_license", "max_line_length": 93, "num_lines": 2, "path": "/README.md", "repo_name": "altopalido/yelp_python", "src_encoding": "UTF-8", "text": "# yelp_python\nWeb Application development with python and SQLite. Using www.yelp.com user reviews Database.\n" }, { "alpha_fraction": 0.7091836929321289, "alphanum_fraction": 0.75, "avg_line_length": 31.66666603088379, "blob_id": "6f43ec49c34ed0bef5490c4bb5429812f9e39012", "content_id": "4248e210aa107d7482ab10c9cc1caafe83c3a189", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 57, "num_lines": 6, "path": "/yelp_python/settings.py", "repo_name": "altopalido/yelp_python", "src_encoding": "UTF-8", "text": "# Madis Settings\nMADIS_PATH='/Users/alexiatopalidou/Desktop/erg/madis/src'\n\n# Webserver Settings\n# IMPORTANT: The port must be available.\nweb_port = 9090 # must be integer (this is wrong:'9090')\n" }, { "alpha_fraction": 0.6126810312271118, "alphanum_fraction": 0.6223949193954468, "avg_line_length": 26.485437393188477, "blob_id": "8a755cbadc70a38a71c48dd1857c6f7f8fa28467", "content_id": "7c6ee49e96b85079d396e129f47804646398218d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5662, "license_type": "no_license", "max_line_length": 361, "num_lines": 206, "path": "/yelp_python/app.py", "repo_name": "altopalido/yelp_python", "src_encoding": "UTF-8", "text": "# ----- CONFIGURE YOUR EDITOR TO USE 4 SPACES PER TAB ----- #\nimport settings\nimport sys\n\n\ndef connection():\n ''' User this function to create your connections '''\n import sys\n sys.path.append(settings.MADIS_PATH)\n import madis\n\n con = madis.functions.Connection('/Users/alexiatopalidou/Desktop/erg/yelp_python/yelp.db')\n \n return con\n\ndef classify_review(reviewid):\n \n#check for compatible data type \n try:\n val=str(reviewid)\n except ValueError:\n return [(\"Error! Insert correct data type.\")]\n \n # Create a new connection\n global con\n con=connection()\n \n # Create cursors on the connection\n #alternative: create the desired list after every textwindow, posterms, negterms query\n cur=con.cursor()\n cura=con.cursor()\n curb=con.cursor()\n cur1=con.cursor()\n cur2=con.cursor()\n \n #check for existance of given data inside the yelp.db\n curcheck=con.cursor()\n cur.execute(\"SELECT var('reviewid',?)\",(reviewid,))\n check=curcheck.execute(\"SELECT review_id from reviews where review_id=?\",(val,))\n try:\n ch=check.next()\n except StopIteration:\n return [(\"Error! Insert valid Review id.\",)]\n \n #sql query with textwindow - one for each occasion (terms with 1, 2 or 3 words)\n res=cur.execute(\"SELECT textwindow(text,0,0,1) from reviews where review_id=var('reviewid');\")\n resa=cura.execute(\"SELECT textwindow(text,0,0,2) from reviews where review_id=var('reviewid');\")\n resb=curb.execute(\"SELECT textwindow(text,0,0,3) from reviews where review_id=var('reviewid');\")\n \n #get positive/negative terms\n res1=cur1.execute(\"SELECT * from posterms;\")\n res2=cur2.execute(\"SELECT * from negterms;\")\n\n #create lists that store a)all reviews terms, b)positive terms and c)negative terms\n k=[]\n for n in res:\n k.append(n)\n\n for n in resa:\n k.append(n)\n\n for n in resb:\n k.append(n)\n\n m=[]\n for z in res1:\n m.append(z)\n\n o=[]\n for p in res2:\n o.append(p)\n\n #check if the review is positive or negative\n x=0\n for i in k:\n for j in m:\n if i==j:\n x=x+1\n\n y=0\n for i in k:\n for j in o:\n if i==j:\n y=y+1 \n \n if x>y:\n rsl='positive'\n elif x<y:\n rsl='negative'\n else:\n rsl='neutral'\n \n #return a list with the results\n res=cur.execute(\"SELECT b.name, ? from business b, reviews r where r.business_id=b.business_id and r.review_id=?\",(rsl, val,))\n \n l=[(\"business_name\",\"result\")]\n for i in res:\n l.append(i)\n\n return l\n\n\n\n\n\ndef classify_review_plain_sql(reviewid):\n\n # Create a new connection\n con=connection()\n \n # Create a cursor on the connection\n cur=con.cursor()\n \n \n return [(\"business_name\",\"result\")]\n\ndef updatezipcode(business_id,zipcode):\n\n #check for compatible data type \n try:\n val=str(business_id)\n val2=int(zipcode)\n except ValueError:\n return [(\"Error! Insert correct data type.\",)]\n \n # Create a new connection\n global con\n con=connection()\n\n # Create a cursor on the connection\n cur=con.cursor()\n\n #check for existance of given data inside the yelp.db or allowance of data value\n curcheck=con.cursor()\n cur.execute(\"select var('business_id',?)\", (val,))\n check=curcheck.execute(\"SELECT business_id from business where business_id=?;\",(val,))\n try:\n ch=check.next()\n except StopIteration:\n return [(\"Error! Insert valid Business Id.\",)]\n if val2>99999999999999999999: #we do not actually need that\n return [(\"Error! Insert valid Zip code.\",)]\n \n #execute main sql query\n res=cur.execute(\"UPDATE business set zip_code=? where business_id=?;\",(val2,val,))\n\n #return ok or comment that return and de-comment the bottom return for the business_id and the new zip_code\n return [('ok',)]\n\n #res=cur.execute(\"SELECT business_id, zip_code from business where business_id=?;\",(val,)) \n #l=[(\"business_id\", \"zip_code\"),]\n\n #for i in res:\n # l.append(i)\n \n #return l\n \n\t\ndef selectTopNbusinesses(category_id,n):\n\n #check for compatible data type \n try:\n val=int(category_id)\n val2=int(n)\n except ValueError:\n return [(\"Error! Insert correct data type\",)]\n \n # Create a new connection\n global con\n con=connection()\n \n # Create a cursor on the connection\n cur=con.cursor()\n \n #check for existance of given data inside the yelp.db\n curcheck=con.cursor()\n cur.execute(\"SELECT var('category_id',?)\", (val,))\n check=curcheck.execute(\"SELECT category_id from category where category_id=?;\",(val,))\n try:\n ch=check.next()\n except StopIteration:\n return [(\"Error! Insert valid Category Id.\",)]\n if val2<0:\n return [(\"Error! Choose >=0 businesses to return.\",)]\n \n #execute main sql query\n res=cur.execute(\"SELECT b.business_id, count(rpn.positive) from reviews_pos_neg rpn, reviews r, business b, business_category bc, category c where rpn.review_id=r.review_id and r.business_id=b.business_id and b.business_id=bc.business_id and bc.category_id=c.category_id and c.category_id=? group by b.business_id order by count(rpn.positive) desc;\",(val,))\n\n #return a list with the results\n l=[(\"business_id\", \"number_of_reviews\",)]\n for i in res:\n l.append(i)\n\n return l[0:val2+1]\n\n\n\ndef traceUserInfuence(userId,depth):\n # Create a new connection\n con=connection()\n # Create a cursor on the connection\n cur=con.cursor()\n \n\n\n return [(\"user_id\",),]\n" } ]
3
smellycats/SX-CarRecgServer
https://github.com/smellycats/SX-CarRecgServer
79640c297195acc74e84c2b08df45ce06c9ba060
3bf19e76f39f60c9a5ba39269dadbad265516056
aa9d4afb1eb52fcdabc99d8b9db2fb240b9dd161
refs/heads/master
"2021-01-20T05:31:17.651000"
"2015-10-11T15:36:44"
"2015-10-11T15:36:44"
38,525,081
0
1
null
"2015-07-04T06:58:28"
"2015-07-04T07:00:14"
"2015-10-11T15:36:44"
Python
[ { "alpha_fraction": 0.5990098714828491, "alphanum_fraction": 0.6138613820075989, "avg_line_length": 27.85714340209961, "blob_id": "ab72a4b7f66d3a1679f6e5cb1e87814a03205b9d", "content_id": "2a37c691c9984d6e58c46639e1dd9f01f0a96461", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 64, "num_lines": 14, "path": "/run.py", "repo_name": "smellycats/SX-CarRecgServer", "src_encoding": "UTF-8", "text": "from car_recg import app\nfrom car_recg.recg_ser import RecgServer\nfrom ini_conf import MyIni\n\nif __name__ == '__main__':\n rs = RecgServer()\n rs.main()\n my_ini = MyIni()\n sys_ini = my_ini.get_sys_conf()\n app.config['THREADS'] = sys_ini['threads']\n app.config['MAXSIZE'] = sys_ini['threads'] * 16\n app.run(host='0.0.0.0', port=sys_ini['port'], threaded=True)\n del rs\n del my_ini\n" }, { "alpha_fraction": 0.5698602795600891, "alphanum_fraction": 0.5938123464584351, "avg_line_length": 17.90566062927246, "blob_id": "f9194bb12d5cc33adf2af50a9c98983377bfea7e", "content_id": "599732078eb06b7b2a9daf5d9a2810b72e5e7d9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1208, "license_type": "no_license", "max_line_length": 69, "num_lines": 53, "path": "/car_recg/config.py", "repo_name": "smellycats/SX-CarRecgServer", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport Queue\n\n\nclass Config(object):\n # 密码 string\n SECRET_KEY = 'hellokitty'\n # 服务器名称 string\n HEADER_SERVER = 'SX-CarRecgServer'\n # 加密次数 int\n ROUNDS = 123456\n # token生存周期,默认1小时 int\n EXPIRES = 7200\n # 数据库连接 string\n SQLALCHEMY_DATABASE_URI = 'mysql://root:[email protected]/hbc_store'\n # 数据库连接绑定 dict\n SQLALCHEMY_BINDS = {}\n # 用户权限范围 dict\n SCOPE_USER = {}\n # 白名单启用 bool\n WHITE_LIST_OPEN = True\n # 白名单列表 set\n WHITE_LIST = set()\n # 处理线程数 int\n THREADS = 4\n # 允许最大数队列为线程数16倍 int\n MAXSIZE = THREADS * 16\n # 图片下载文件夹 string\n IMG_PATH = 'img'\n # 图片截取文件夹 string\n CROP_PATH = 'crop'\n # 超时 int\n TIMEOUT = 5\n # 识别优先队列 object\n RECGQUE = Queue.PriorityQueue()\n # 退出标记 bool\n IS_QUIT = False\n # 用户字典 dict\n USER = {}\n # 上传文件保存路径 string\n UPLOAD_PATH = 'upload'\n\n\nclass Develop(Config):\n DEBUG = True\n\n\nclass Production(Config):\n DEBUG = False\n\n\nclass Testing(Config):\n TESTING = True\n" }, { "alpha_fraction": 0.5402216911315918, "alphanum_fraction": 0.5586342215538025, "avg_line_length": 30.784090042114258, "blob_id": "a86f1255ebca9673b94ccbc5613acd9ad3e0a42e", "content_id": "ac75ddfe82f4da2667af81fe12be94049ea340aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5750, "license_type": "no_license", "max_line_length": 134, "num_lines": 176, "path": "/car_recg/views.py", "repo_name": "smellycats/SX-CarRecgServer", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport Queue\nimport random\nfrom functools import wraps\n\nimport arrow\nfrom flask import g, request\nfrom flask_restful import reqparse, Resource\nfrom passlib.hash import sha256_crypt\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n\nfrom car_recg import app, db, api, auth, limiter, logger, access_logger\nfrom models import Users, Scope\nimport helper\n\n\ndef verify_addr(f):\n \"\"\"IP地址白名单\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not app.config['WHITE_LIST_OPEN'] or request.remote_addr == '127.0.0.1' or request.remote_addr in app.config['WHITE_LIST']:\n pass\n else:\n return {'status': '403.6',\n 'message': u'禁止访问:客户端的 IP 地址被拒绝'}, 403\n return f(*args, **kwargs)\n return decorated_function\n\n\[email protected]_password\ndef verify_password(username, password):\n if username.lower() == 'admin':\n user = Users.query.filter_by(username='admin').first()\n else:\n return False\n if user:\n return sha256_crypt.verify(password, user.password)\n return False\n\n\ndef verify_token(f):\n \"\"\"token验证装饰器\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not request.headers.get('Access-Token'):\n return {'status': '401.6', 'message': 'missing token header'}, 401\n token_result = verify_auth_token(request.headers['Access-Token'],\n app.config['SECRET_KEY'])\n if not token_result:\n return {'status': '401.7', 'message': 'invalid token'}, 401\n elif token_result == 'expired':\n return {'status': '401.8', 'message': 'token expired'}, 401\n g.uid = token_result['uid']\n g.scope = set(token_result['scope'])\n\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef verify_scope(scope):\n def scope(f):\n \"\"\"权限范围验证装饰器\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'all' in g.scope or scope in g.scope:\n return f(*args, **kwargs)\n else:\n return {}, 405\n return decorated_function\n return scope\n\n\nclass Index(Resource):\n\n def get(self):\n return {\n 'user_url': '%suser{/user_id}' % (request.url_root),\n 'scope_url': '%suser/scope' % (request.url_root),\n 'token_url': '%stoken' % (request.url_root),\n 'recg_url': '%sv1/recg' % (request.url_root),\n 'uploadrecg_url': '%sv1/uploadrecg' % (request.url_root),\n 'state_url': '%sv1/state' % (request.url_root)\n }, 200, {'Cache-Control': 'public, max-age=60, s-maxage=60'}\n\n\nclass RecgListApiV1(Resource):\n\n def post(self):\n parser = reqparse.RequestParser()\n\n parser.add_argument('imgurl', type=unicode, required=True,\n help='A jpg url is require', location='json')\n parser.add_argument('coord', type=list, required=True,\n help='A coordinates array is require',\n location='json')\n args = parser.parse_args()\n\n # 回调用的消息队列\n que = Queue.Queue()\n\n if app.config['RECGQUE'].qsize() > app.config['MAXSIZE']:\n return {'message': 'Server Is Busy'}, 449\n\n imgname = '%32x' % random.getrandbits(128)\n imgpath = os.path.join(app.config['IMG_PATH'], '%s.jpg' % imgname)\n try:\n helper.get_url_img(request.json['imgurl'], imgpath)\n except Exception as e:\n logger.error('Error url: %s' % request.json['imgurl'])\n return {'message': 'URL Error'}, 400\n\n app.config['RECGQUE'].put((10, request.json, que, imgpath))\n\n try:\n recginfo = que.get(timeout=15)\n\n os.remove(imgpath)\n except Queue.Empty:\n return {'message': 'Timeout'}, 408\n except Exception as e:\n logger.error(e)\n else:\n return {\n 'imgurl': request.json['imgurl'],\n 'coord': request.json['coord'],\n 'recginfo': recginfo\n }, 201\n\n\nclass StateListApiV1(Resource):\n\n def get(self):\n return {\n 'threads': app.config['THREADS'],\n 'qsize': app.config['RECGQUE'].qsize()\n }\n\n\nclass UploadRecgListApiV1(Resource):\n\n def post(self):\n # 文件夹路径 string\n filepath = os.path.join(app.config['UPLOAD_PATH'],\n arrow.now().format('YYYYMMDD'))\n if not os.path.exists(filepath):\n os.makedirs(filepath)\n try:\n # 上传文件命名 随机32位16进制字符 string\n imgname = '%32x' % random.getrandbits(128)\n # 文件绝对路径 string\n imgpath = os.path.join(filepath, '%s.jpg' % imgname)\n f = request.files['file']\n f.save(imgpath)\n except Exception as e:\n logger.error(e)\n return {'message': 'File error'}, 400\n\n # 回调用的消息队列 object\n que = Queue.Queue()\n # 识别参数字典 dict\n r = {'coord': []}\n app.config['RECGQUE'].put((9, r, que, imgpath))\n try:\n recginfo = que.get(timeout=app.config['TIMEOUT'])\n except Queue.Empty:\n return {'message': 'Timeout'}, 408\n except Exception as e:\n logger.error(e)\n else:\n return {'coord': r['coord'], 'recginfo': recginfo}, 201\n\napi.add_resource(Index, '/')\napi.add_resource(RecgListApiV1, '/v1/recg')\napi.add_resource(StateListApiV1, '/v1/state')\napi.add_resource(UploadRecgListApiV1, '/v1/uploadrecg')\n" } ]
3
josemiche11/reversebycondition
https://github.com/josemiche11/reversebycondition
5f60ded07b0fdbc53064faa6313b2c79f57e97c1
39cc0c8299f004cb8a78db1c8858dd0b0306eeed
84f732ebfe9c1a63ef830dcb77c0fcf692b503c6
refs/heads/master
"2022-11-21T09:11:10.189000"
"2020-07-16T14:51:05"
"2020-07-16T14:51:05"
280,180,898
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47233203053474426, "alphanum_fraction": 0.5395256876945496, "avg_line_length": 17.461538314819336, "blob_id": "a12e86efe2fbb03a43f1647593888a601a6d17d6", "content_id": "d5bc11625583581c7acf424af05185e267ecf40f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "no_license", "max_line_length": 45, "num_lines": 26, "path": "/reversebycondition.py", "repo_name": "josemiche11/reversebycondition", "src_encoding": "UTF-8", "text": "'''\r\nInput- zoho123\r\nOutput- ohoz123\r\n\r\n'''\r\nchar= input(\"Enter the string: \")\r\nchar2= list(char)\r\nnum= \"1234567890\"\r\nlist1= [0]*len(char)\r\nlist2=[]\r\nfor i in range(len(char)):\r\n if char2[i] not in num:\r\n list2.append( char2.index( char2[i]))\r\n char2[i]= \"*\"\r\nlist2.reverse()\r\nk=0\r\nfor j in range( len(char) ):\r\n if j in list2:\r\n list1[j]= char[list2[k]]\r\n k= k+1\r\n else:\r\n list1[j]= char[j]\r\nch=\"\"\r\nfor l in range(len(list1)):\r\n ch= ch+ list1[l]\r\nprint(ch)\r\n" } ]
1
Lasyin/batch-resize
https://github.com/Lasyin/batch-resize
55e42ae9a1453b7e2234473da061ed5e4d42fe55
c25835295cea8d5f8d7cb60d743d68945e3fbd44
f2be945f8f1c8e440af11857998d006393804adf
refs/heads/master
"2020-03-10T19:47:31.606000"
"2018-04-14T22:32:27"
"2018-04-14T22:32:27"
129,555,807
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5518633723258972, "alphanum_fraction": 0.5549689531326294, "avg_line_length": 43.109588623046875, "blob_id": "6796e440ac3fd350da6e3f8737097308f99c2a8a", "content_id": "f9363232b48149f9e90dc005d98ce5e71844fa25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3220, "license_type": "no_license", "max_line_length": 179, "num_lines": 73, "path": "/batch_resize.py", "repo_name": "Lasyin/batch-resize", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport argparse\nfrom PIL import Image # From Pillow (pip install Pillow)\n\ndef resize_photos(dir, new_x, new_y, scale):\n if(not os.path.exists(dir)):\n # if not in full path format (/usrers/user/....)\n # check if path is in local format (folder is in current working directory)\n if(not os.path.exists(os.path.join(os.getcwd(), dir))):\n print(dir + \" does not exist.\")\n exit()\n else:\n # path is not a full path, but folder exists in current working directory\n # convert path to full path\n dir = os.path.join(os.getcwd(), dir)\n \n i = 1 # image counter for print statements\n for f in os.listdir(dir):\n if(not f.startswith('.') and '.' in f):\n # accepted image types. add more types if you need to support them!\n accepted_types = [\"jpg\", \"png\", \"bmp\"]\n if(f[-3:].lower() in accepted_types):\n # checks last 3 letters of file name to check file type (png, jpg, bmp...)\n # TODO: need to handle filetypes of more than 3 letters (for example, jpeg)\n path = os.path.join(dir, f)\n img = Image.open(path)\n\n if(scale > 0):\n w, h = img.size\n newIm = img.resize((w*scale, h*scale))\n else:\n newIm = img.resize((new_x, new_y))\n\n newIm.save(path)\n print(\"Image #\" + str(i) + \" finsihed resizing: \" + path)\n i=i+1\n else:\n print(f + \" of type: \" + f[-3:].lower() + \" is not an accepted file type. Skipping.\")\n print(\"ALL DONE :) Resized: \" + str(i) + \" photos\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"-directory\", help=\"(String) Specify the folder path of images to resize\")\n parser.add_argument(\"-s\", \"-size\", help=\"(Integer) New pixel value of both width and height. To specify width and height seperately, use -x and -y.\")\n parser.add_argument(\"-x\", \"-width\", help=\"(Integer) New pixel value of width\")\n parser.add_argument(\"-y\", \"-height\", help=\"(Integer) New pixel value of height\")\n parser.add_argument(\"-t\", \"-scale\", help=\"(Integer) Scales pixel sizes.\")\n\n args = parser.parse_args()\n\n if(not args.d or ((not args.s) and (not args.x and not args.y) and (not args.t))):\n print(\"You have error(s)...\\n\")\n if(not args.d):\n print(\"+ DIRECTORY value missing Please provide a path to the folder of images using the argument '-d'\\n\")\n if((not args.s) and (not args.x or not args.y) and (not args.t)):\n print(\"+ SIZE value(s) missing! Please provide a new pixel size. Do this by specifying -s (width and height) OR -x (width) and -y (height) values OR -t (scale) value\")\n exit()\n\n x = 0\n y = 0\n scale = 0\n if(args.s):\n x = int(args.s)\n y = int(args.s)\n elif(args.x and args.y):\n x = int(args.x)\n y = int(args.y)\n elif(args.t):\n scale = int(args.t)\n\n print(\"Resizing all photos in: \" + args.d + \" to size: \" + str(x)+\"px,\"+str(y)+\"px\")\n resize_photos(args.d, x, y, scale)\n" }, { "alpha_fraction": 0.6862567663192749, "alphanum_fraction": 0.7061482667922974, "avg_line_length": 23.577777862548828, "blob_id": "c49561649fdbe36b3bff57e9bbd159abd67ec1a7", "content_id": "55455c24678e9f84d41ec2f9b1d77a17f6f5ea13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1106, "license_type": "no_license", "max_line_length": 132, "num_lines": 45, "path": "/README.md", "repo_name": "Lasyin/batch-resize", "src_encoding": "UTF-8", "text": "# batch-resize\nPython script to resize every image in a folder to a specified size.\n\n# Arguments\n<pre>\n-h or -help\n\t- List arguments and their meanings\n-s or -size\n\t- New pixel value of both width and height.\n-x or -width\n\t- New pixel value of width\n-y or -height\n\t- New pixel value of height\n-t or -scale\n\t- Scales pixel sizes\n</pre>\n<hr/>\n\n# Example Usage\n<pre>\npython batch_resize.py -d folder_name -s 128\n-> Resizes all images in 'folder_name' to 128x128px\n\npython batch_resize.py -d full/path/to/image_folder -x 128 -y 256\n-> Resizes all images in 'image_folder' (listed as a full path, do this if you're not in the current working directory) to 128x256px\n\npython batch_resize.py -d folder_name -t 2\n-> Resizes all images in 'folder_name' to twice their original size\n</pre>\n<hr />\n\n## Accepted Image Types:\n<pre>\n- Jpg, Png, Bmp (more can easily be added by editing the 'accepted_types' list in the python file)\n</pre>\n<hr />\n\n# Dependencies\n<pre>\n- Pillow, a fork of PIL.\n - Download from pip:\n - pip install Pillow\n - Link to their Github:\n - https://github.com/python-pillow/Pillow\n</pre>\n" } ]
2
snehG0205/Twitter_Mining
https://github.com/snehG0205/Twitter_Mining
07afe8b7b13a47b945edc20eaa49b1c5dae1e5de
f87d8e2b79d3762e1433fc1aedc872328b9e2a44
633add3474dda33e56bd6a75180012a746b94c82
refs/heads/master
"2021-10-19T15:35:59.450000"
"2019-02-22T06:34:45"
"2019-02-22T06:34:45"
125,955,851
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6556740999221802, "alphanum_fraction": 0.6679599285125732, "avg_line_length": 28.730770111083984, "blob_id": "aba120ef3281919e58cb14569a7e79b312674a5d", "content_id": "72195e7ea9bb9f15b3ae80a0b30ba32aa6d97706", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3093, "license_type": "no_license", "max_line_length": 157, "num_lines": 104, "path": "/tweepy_tester.py", "repo_name": "snehG0205/Twitter_Mining", "src_encoding": "UTF-8", "text": "import tweepy\nimport csv\nimport pandas as pd\nfrom textblob import TextBlob\nimport matplotlib.pyplot as plt\n\n####input your credentials here\nconsumer_key = 'FgCG8zcxF4oINeuAqUYzOw9xh'\nconsumer_secret = 'SrSu7WhrYUpMZnHw7a5ui92rUA1n2jXNoZVb3nJ5wEsXC5xlN9'\naccess_token = '975924102190874624-uk5zGlYRwItkj7pZO2m89NefRm5DFLg'\naccess_token_secret = 'ChvmTjG8hl61xUrXkk3AdKcXMlvAKf4ise1kIQLKsnPu4'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth,wait_on_rate_limit=True)\n\n# Open/Create a file to append data\ncsvFile = open('tweets.csv', 'w+')\n# Use csv Writer\ncsvWriter = csv.writer(csvFile)\ntag = \"#DonaldTrump\"\nlimit = 0\nres = \"\"\npositive = 0\nnegative = 0\nneutral = 0\ncsvWriter.writerow([\"ID\", \"Username\", \"Twitter @\", \"Tweet\",\"Tweeted At\", \"Favourite Count\", \"Retweet Count\", \"Sentiment\"])\ncsvWriter.writerow([])\n\nfor tweet in tweepy.Cursor(api.search,q=\"\"+tag,count=350,lang=\"en\",tweet_mode = \"extended\").items():\n # print (tweet.created_at, tweet.text)\n temp = tweet.full_text\n if temp.startswith('RT @'):\n \tcontinue\n blob = TextBlob(tweet.full_text)\n if blob.sentiment.polarity > 0:\n res = \"Positive\"\n positive = positive+1\n elif blob.sentiment.polarity == 0:\n res = \"Neutral\"\n neutral = neutral+1\n else:\n res = \"Negative\"\n negative = negative+1\n\n\n print (\"ID:\", tweet.id)\n print (\"User ID:\", tweet.user.id)\n print (\"Name: \", tweet.user.name)\n print (\"Twitter @:\", tweet.user.screen_name)\n print (\"Text:\", tweet.full_text)\n print (\"Tweet length:\", len(tweet.full_text))\n print (\"Created:(UTC)\", tweet.created_at)\n print (\"Favorite Count:\", tweet.favorite_count)\n print (\"Retweet count:\", tweet.retweet_count)\n print (\"Sentiment:\", res)\n # print (\"Retweeted? :\", tweet.retweeted)\n # print (\"Truncated:\", tweet.truncated)\n print (\"\\n\\n\")\n \n csvWriter.writerow([tweet.id, tweet.user.name, tweet.user.screen_name, tweet.full_text,tweet.created_at, tweet.favorite_count, tweet.retweet_count, res])\n csvWriter.writerow([])\n limit = limit + 1\n if limit == 25:\n \tbreak\n\nprint (\"Done\")\n\nprint (\"\\n\\n\\n\")\ntotal = positive+negative+neutral\npositivePercent = 100*(positive/total)\nnegativePercent = 100*(negative/total)\nneutralPercent = 100*(neutral/total)\n\nprint (\"Positive tweets: {} %\".format(positivePercent))\nprint (\"Negative tweets: {} %\".format(negativePercent))\nprint (\"Neutral tweets: {} %\".format(neutralPercent))\n\n\n\n# infile = 'tweets.csv'\n\n# with open(infile, 'r') as csvfile:\n# rows = csv.reader(csvfile)\n# for row in rows:\n# sentence = row[3]\n# blob = TextBlob(sentence)\n# print (blob.sentiment)\n\n\nlabels = 'Neutral', 'Positive', 'Negative'\nsizes = []\nsizes.append(neutralPercent)\nsizes.append(positivePercent)\nsizes.append(negativePercent)\ncolors = ['lightskyblue','yellowgreen', 'lightcoral']\nexplode = (0.0, 0, 0) # explode 1st slice\n \n# Plot\nplt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=False, startangle=140)\nplt.suptitle(\"Sentiment Analysis of {} tweets related to {}\".format(limit, tag))\nplt.axis('equal')\nplt.show()\n\n" }, { "alpha_fraction": 0.7163323760032654, "alphanum_fraction": 0.7295128703117371, "avg_line_length": 32.53845977783203, "blob_id": "2efbd8a6c914e2dfed3d51056ceee97ee5b509e6", "content_id": "2833c267efe475d95a1cef249dce1e29ac587382", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1745, "license_type": "no_license", "max_line_length": 146, "num_lines": 52, "path": "/twitter1.py", "repo_name": "snehG0205/Twitter_Mining", "src_encoding": "UTF-8", "text": "import tweepy\nimport csv\nimport pandas as pd\n\n\n# keys and tokens from the Twitter Dev Console\nconsumer_key = 'FgCG8zcxF4oINeuAqUYzOw9xh'\nconsumer_secret = 'SrSu7WhrYUpMZnHw7a5ui92rUA1n2jXNoZVb3nJ5wEsXC5xlN9'\naccess_token = '975924102190874624-uk5zGlYRwItkj7pZO2m89NefRm5DFLg'\naccess_token_secret = 'ChvmTjG8hl61xUrXkk3AdKcXMlvAKf4ise1kIQLKsnPu4'\n\n#Twitter only allows access to a users most recent 3240 tweets with this method\n\n#authorize twitter, initialize tweepy\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\n#initialize a list to hold all the tweepy Tweets\nalltweets = [] \n\n#make initial request for most recent tweets (200 is the maximum allowed count)\nnew_tweets = api.search(q=\"#DonaldTrump\",count=200,tweet_mode=\"extended\")\n\n#save most recent tweets\nalltweets.extend(new_tweets)\n\n#save the id of the oldest tweet less one\n# oldest = alltweets[-1].id - 1\n#keep grabbing tweets until there are no tweets left to grab\nwhile len(new_tweets) > 0:\n # print \"getting tweets before %s\" % (oldest)\n\n #all subsiquent requests use the max_id param to prevent duplicates\n new_tweets = api.search(q=\"#DonaldTrump\",count=200,tweet_mode=\"extended\")\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n # print \"...%s tweets downloaded so far\" % (len(alltweets))\n\n#transform the tweepy tweets into a 2D array that will populate the csv \nouttweets = [[tweet.id_str, tweet.created_at, tweet.full_tweet.encode(\"utf-8\"), tweet.retweet_count, tweet.favorite_count] for tweet in alltweets]\n\n#write the csv \nwith open('tweets.csv', 'w+') as f:\n\twriter = csv.writer(f)\n\twriter.writerow([\"id\",\"created_at\",\"full_text\",\"retweet_count\",\"favorite_count\"])\n\twriter.writerows(outtweets)\n\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 20.5, "blob_id": "ff395d4bb49fc3beffba7f095edc3957b177a241", "content_id": "d2d7c1f92038560521d86901783a12f74893b139", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42, "license_type": "no_license", "max_line_length": 31, "num_lines": 2, "path": "/tester.py", "repo_name": "snehG0205/Twitter_Mining", "src_encoding": "UTF-8", "text": "import csv\ncsvFile = open('res.csv', 'w+')" }, { "alpha_fraction": 0.5901639461517334, "alphanum_fraction": 0.5901639461517334, "avg_line_length": 9.333333015441895, "blob_id": "09e17fa3e4c21c574b1b329407a54601c3c0300e", "content_id": "324cc41df98e99064dd5ce5c675324275e69021a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 61, "license_type": "no_license", "max_line_length": 34, "num_lines": 6, "path": "/Twitter-Flask/file.php", "repo_name": "snehG0205/Twitter_Mining", "src_encoding": "UTF-8", "text": "<?php \n\n$output = exec(\"python hello.py\");\n echo $output;\n\n?>" }, { "alpha_fraction": 0.7117117047309875, "alphanum_fraction": 0.7297297120094299, "avg_line_length": 21.399999618530273, "blob_id": "9d05848fca870bc8f097dc43dbfecf368fff3dfa", "content_id": "53e4143fa77b6a317b9be860985b078dc0a822a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/Twitter-Flask/untitled.py", "repo_name": "snehG0205/Twitter_Mining", "src_encoding": "UTF-8", "text": "from test import mining\ntag = \"#WednesdayWisdom\"\nlimit = \"10\"\nsen_list = mining(tag,int(limit))\nprint(sen_list)" }, { "alpha_fraction": 0.6271721720695496, "alphanum_fraction": 0.6319115161895752, "avg_line_length": 25.41666603088379, "blob_id": "5e8e23ac66c45df58c85d54a354bc26c2a06388d", "content_id": "024802de0d6a627f6de923a455cc01cfec84070b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 633, "license_type": "no_license", "max_line_length": 124, "num_lines": 24, "path": "/Twitter-Flask/app.py", "repo_name": "snehG0205/Twitter_Mining", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request\nfrom test import mining\napp = Flask(__name__)\n\[email protected]('/')\ndef index():\n\treturn render_template('hello.html')\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef submit():\n\tif request.method == 'POST':\n\t\tprint (request.form) # debug line, see data printed below\n\t\ttag = request.form['tag']\n\t\tlimit = request.form['limit']\n\t\t# msg = tag+\" \"+limit\n\t\tsen_list = mining(tag,limit)\n\t\tmsg = \"Positive Percent = \"+sen_list[0]+\"% <br>Negative Percent = \"+sen_list[1]+\"% <br>Neutral Percent = \"+sen_list[2]+\"%\"\n\treturn \"\"+msg\n\nif __name__ == '__main__':\n app.run(debug = True)\n\nprint(\"This\")" }, { "alpha_fraction": 0.694915235042572, "alphanum_fraction": 0.694915235042572, "avg_line_length": 14, "blob_id": "7d944fc7f3f90d2c45c2be1a4cb29e3436e022bc", "content_id": "a94f6b0e01c4acb941618ccacb272702f969b61d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 21, "num_lines": 4, "path": "/Twitter-Flask/hello.py", "repo_name": "snehG0205/Twitter_Mining", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nprint (\"some output\")\nreturn \"hello\"" }, { "alpha_fraction": 0.6503340601921082, "alphanum_fraction": 0.685968816280365, "avg_line_length": 27.125, "blob_id": "fc4f88c8ad74c39d37179b4bf4427f3d88fd17fc", "content_id": "f2fcc56e5d84fc16aa2b1fe4003396415b7b96a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 449, "license_type": "no_license", "max_line_length": 61, "num_lines": 16, "path": "/piPlotter.py", "repo_name": "snehG0205/Twitter_Mining", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\n \n# Data to plot\nlabels = 'Neutral', 'Positive', 'Negative'\nsizes = [20, 40, 40]\ncolors = ['lightskyblue','yellowgreen', 'lightcoral']\nexplode = (0.0, 0, 0) # explode 1st slice\n \n# Plot\nplt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=140)\n \nplt.axis('equal')\n# plt.title('Sentiment analysis')\nplt.suptitle('Analysing n tweets related to #')\nplt.show()" } ]
8
leelongcrazy/imooc
https://github.com/leelongcrazy/imooc
e72a2d9f9a0de3cfe0937eddbf57169d4c6b2d0a
2dde9ed541fb3994ce71ac6c39e2b8fd8b012e85
83071771ce397a5079f5666c68089d4d3a8ea216
refs/heads/master
"2022-01-23T10:19:30.966000"
"2021-03-15T12:44:06"
"2021-03-15T12:44:06"
139,556,106
0
0
null
"2018-07-03T08:57:50"
"2021-03-15T12:44:20"
"2022-03-11T23:25:32"
Python
[{"alpha_fraction":0.5564435720443726,"alphanum_fraction":0.589910089969635,"avg_line_length":23.291(...TRUNCATED)
30
nopple/ctf
https://github.com/nopple/ctf
d564e8f5cae796f19e7f5b54bf67a7a3c393a976
d195ae382c2a0e260aed66ceb17c6387436ef730
d7b769740220641e9fe020c2c60a5c13fe602390
refs/heads/master
"2016-09-05T14:02:54.899000"
"2014-05-21T22:50:59"
"2014-05-21T22:50:59"
19,962,980
1
1
null
null
null
null
null
[{"alpha_fraction":0.7200000286102295,"alphanum_fraction":0.7200000286102295,"avg_line_length":11.5,(...TRUNCATED)
5
Downloads last month
2
Edit dataset card