以简单英文问答问题为例测试tensorflow1.4 tf.contrib.legacy_seq2seq中seq2seq文件的几个seq2seq接口

github:https://github.com/buyizhiyou/tf_seq2seq

测试 basic_rnn_seq2seq 的使用

 #-*-coding:utf8-*-

 __author="buyizhiyou"
__date = "2018-7-30" import os
import pdb
import re
from collections import Counter
import matplotlib.pyplot as plt import tensorflow as tf
from seq2seq import basic_rnn_seq2seq from utils import * os.environ["CUDA_VISIBLE_DEVICES"] = ""#choose GPU 1 input_batches = [
['Hi What is your name?', 'Nice to meet you!'],
['Which programming language do you use?', 'See you later.'],
['Where do you live?', 'What is your major?'],
['What do you want to drink?', 'What is your favorite beer?']] target_batches = [
['Hi this is Jaemin.', 'Nice to meet you too!'],
['I like Python.', 'Bye Bye.'],
['I live in Seoul, South Korea.', 'I study industrial engineering.'],
['Beer please!', 'Leffe brown!']] all_input_sentences = []
for input_batch in input_batches:
all_input_sentences.extend(input_batch)
all_target_sentences = []
for target_batch in target_batches:
all_target_sentences.extend(target_batch) enc_vocab, enc_reverse_vocab, enc_vocab_size = build_vocab(all_input_sentences)#enc_vocab:word2idx,enc_reverse_vacab:idx2word,enc_vocab_size:26
dec_vocab, dec_reverse_vocab, dec_vocab_size = build_vocab(all_target_sentences, is_target=True)##dec_vocab:word2idx,dec_reverse_vacab:idx2word,dec_vocab_size:28 #hyperParameters
n_epoch = 2000
hidden_size = 50
enc_emb_size = 20
dec_emb_size = 21
enc_sentence_length=10
dec_sentence_length=11 enc_inputs = tf.placeholder(tf.int32,shape=[None,enc_sentence_length],name='input_sentences')
sequence_lengths = tf.placeholder(tf.int32,shape=[None],name='sentences_length')
dec_inputs = tf.placeholder(tf.int32,shape=[None,dec_sentence_length+1],name='output_sentences') enc_inputs_t = tf.transpose(enc_inputs,perm=[1,0])
dec_inputs_t = tf.transpose(dec_inputs,perm=[1,0]) '''
embedding
'''
enc_Wemb = tf.get_variable('enc_word_emb',initializer=tf.random_uniform([enc_vocab_size+1,enc_emb_size]))
dec_Wemb = tf.get_variable('dec_word_emb',initializer=tf.random_uniform([dec_vocab_size+2,dec_emb_size]))
enc_emb_inputs = tf.nn.embedding_lookup(enc_Wemb,enc_inputs_t)
dec_emb_inputs = tf.nn.embedding_lookup(dec_Wemb,dec_inputs_t)
# enc_emb_inputs:list(enc_sent_len) of tensor[batch_size x embedding_size]
# Because `static_rnn` takes list inputs
enc_emb_inputs = tf.unstack(enc_emb_inputs)
dec_emb_inputs = tf.unstack(dec_emb_inputs) cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
dec_outputs,state = basic_rnn_seq2seq(enc_emb_inputs,dec_emb_inputs,cell)
dec_outputs = tf.stack(dec_outputs)
logits = tf.layers.dense(dec_outputs,units=dec_vocab_size+2,activation=tf.nn.relu)#fc层
predictions = tf.argmax(logits,axis=2)
predictions = tf.transpose(predictions,[1,0])
# labels & logits: [dec_sentence_length+1 x batch_size x dec_vocab_size+2]
labels = tf.one_hot(dec_inputs_t, dec_vocab_size+2) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)) # training_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)
training_op = tf.train.RMSPropOptimizer(learning_rate=0.0001).minimize(loss) with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
loss_history = []
for epoch in range(n_epoch):
all_preds = []
epoch_loss = 0
for input_batch,target_batch in zip(input_batches,target_batches):
input_token_indices = []
target_token_indices = []
sentence_lengths = [] for input_sent in input_batch:
input_sent,sent_len = sent2idx(input_sent,vocab=enc_vocab,max_sentence_length=enc_sentence_length)
input_token_indices.append(input_sent)
sentence_lengths.append(sent_len) for target_sent in target_batch:
target_token_indices.append(sent2idx(target_sent,vocab=dec_vocab,max_sentence_length=dec_sentence_length,is_target=True)) batch_preds,batch_loss, _ = sess.run(
[predictions,loss, training_op],
feed_dict={
enc_inputs: input_token_indices,
sequence_lengths: sentence_lengths,
dec_inputs: target_token_indices
})
loss_history.append(batch_loss)
epoch_loss += batch_loss
all_preds.append(batch_preds) # Logging every 400 epochs
if epoch % 400 == 0:
print('Epoch', epoch)
for input_batch, target_batch, batch_preds in zip(input_batches, target_batches, all_preds):
for input_sent, target_sent, pred in zip(input_batch, target_batch, batch_preds):
print('\t', input_sent)
print('\t => ', idx2sent(pred, reverse_vocab=dec_reverse_vocab))
print('\tCorrent answer:', target_sent)
print('\tepoch loss: {:.2f}\n'.format(epoch_loss)) show_loss(loss_history)

测试 tied_rnn_seq2seq 的使用(该接口encoder和decoder共享参数)

 #-*-coding:utf8-*-

 __author="buyizhiyou"
__date = "2018-7-30" import os
import pdb
import re
from collections import Counter
import matplotlib.pyplot as plt import tensorflow as tf
from seq2seq import tied_rnn_seq2seq from utils import * os.environ["CUDA_VISIBLE_DEVICES"] = ""#choose GPU 1 input_batches = [
['Hi What is your name?', 'Nice to meet you!'],
['Which programming language do you use?', 'See you later.'],
['Where do you live?', 'What is your major?'],
['What do you want to drink?', 'What is your favorite beer?']] target_batches = [
['Hi this is Jaemin.', 'Nice to meet you too!'],
['I like Python.', 'Bye Bye.'],
['I live in Seoul, South Korea.', 'I study industrial engineering.'],
['Beer please!', 'Leffe brown!']] all_input_sentences = []
for input_batch in input_batches:
all_input_sentences.extend(input_batch)
all_target_sentences = []
for target_batch in target_batches:
all_target_sentences.extend(target_batch) enc_vocab, enc_reverse_vocab, enc_vocab_size = build_vocab(all_input_sentences)#enc_vocab:word2idx,enc_reverse_vacab:idx2word,enc_vocab_size:26
dec_vocab, dec_reverse_vocab, dec_vocab_size = build_vocab(all_target_sentences, is_target=True)##dec_vocab:word2idx,dec_reverse_vacab:idx2word,dec_vocab_size:28 #hyperParameters
n_epoch = 2000
hidden_size = 50
enc_emb_size = 20
dec_emb_size = 20#must consistent with enc_emb_size for parameter sharing
enc_sentence_length=10
dec_sentence_length=11 enc_inputs = tf.placeholder(tf.int32,shape=[None,enc_sentence_length],name='input_sentences')
sequence_lengths = tf.placeholder(tf.int32,shape=[None],name='sentences_length')
dec_inputs = tf.placeholder(tf.int32,shape=[None,dec_sentence_length+1],name='output_sentences') enc_inputs_t = tf.transpose(enc_inputs,perm=[1,0])
dec_inputs_t = tf.transpose(dec_inputs,perm=[1,0]) '''
embedding
'''
enc_Wemb = tf.get_variable('enc_word_emb',initializer=tf.random_uniform([enc_vocab_size+1,enc_emb_size]))
dec_Wemb = tf.get_variable('dec_word_emb',initializer=tf.random_uniform([dec_vocab_size+2,dec_emb_size]))
enc_emb_inputs = tf.nn.embedding_lookup(enc_Wemb,enc_inputs_t)
dec_emb_inputs = tf.nn.embedding_lookup(dec_Wemb,dec_inputs_t)
# enc_emb_inputs:list(enc_sent_len) of tensor[batch_size x embedding_size]
# Because `static_rnn` takes list inputs
enc_emb_inputs = tf.unstack(enc_emb_inputs)
dec_emb_inputs = tf.unstack(dec_emb_inputs) cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
dec_outputs,state = tied_rnn_seq2seq(enc_emb_inputs,dec_emb_inputs,cell)
dec_outputs = tf.stack(dec_outputs)
logits = tf.layers.dense(dec_outputs,units=dec_vocab_size+2,activation=tf.nn.relu)#fc层
predictions = tf.argmax(logits,axis=2)
predictions = tf.transpose(predictions,[1,0])
# labels & logits: [dec_sentence_length+1 x batch_size x dec_vocab_size+2]
labels = tf.one_hot(dec_inputs_t, dec_vocab_size+2) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)) # training_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)
training_op = tf.train.RMSPropOptimizer(learning_rate=0.0001).minimize(loss) with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
loss_history = []
for epoch in range(n_epoch):
all_preds = []
epoch_loss = 0
for input_batch,target_batch in zip(input_batches,target_batches):
input_token_indices = []
target_token_indices = []
sentence_lengths = [] for input_sent in input_batch:
input_sent,sent_len = sent2idx(input_sent,vocab=enc_vocab,max_sentence_length=enc_sentence_length)
input_token_indices.append(input_sent)
sentence_lengths.append(sent_len) for target_sent in target_batch:
target_token_indices.append(sent2idx(target_sent,vocab=dec_vocab,max_sentence_length=dec_sentence_length,is_target=True)) batch_preds,batch_loss, _ = sess.run(
[predictions,loss, training_op],
feed_dict={
enc_inputs: input_token_indices,
sequence_lengths: sentence_lengths,
dec_inputs: target_token_indices
})
loss_history.append(batch_loss)
epoch_loss += batch_loss
all_preds.append(batch_preds) # Logging every 400 epochs
if epoch % 400 == 0:
print('Epoch', epoch)
for input_batch, target_batch, batch_preds in zip(input_batches, target_batches, all_preds):
for input_sent, target_sent, pred in zip(input_batch, target_batch, batch_preds):
print('\t', input_sent)
print('\t => ', idx2sent(pred, reverse_vocab=dec_reverse_vocab))
print('\tCorrent answer:', target_sent)
print('\tepoch loss: {:.2f}\n'.format(epoch_loss)) show_loss(loss_history)

测试 embedding_attention_seq2seq的使用

 #-*-coding:utf8-*-

 __author="buyizhiyou"
__date = "2018-7-30" import os
import pdb
import re
from collections import Counter
import matplotlib.pyplot as plt import tensorflow as tf
from seq2seq import embedding_attention_seq2seq from utils import * os.environ["CUDA_VISIBLE_DEVICES"] = ""#choose GPU 1 input_batches = [
['Hi What is your name?', 'Nice to meet you!'],
['Which programming language do you use?', 'See you later.'],
['Where do you live?', 'What is your major?'],
['What do you want to drink?', 'What is your favorite beer?']] target_batches = [
['Hi this is Jaemin.', 'Nice to meet you too!'],
['I like Python.', 'Bye Bye.'],
['I live in Seoul, South Korea.', 'I study industrial engineering.'],
['Beer please!', 'Leffe brown!']] all_input_sentences = []
for input_batch in input_batches:
all_input_sentences.extend(input_batch)
all_target_sentences = []
for target_batch in target_batches:
all_target_sentences.extend(target_batch) enc_vocab, enc_reverse_vocab, enc_vocab_size = build_vocab(all_input_sentences)#enc_vocab:word2idx,enc_reverse_vacab:idx2word,enc_vocab_size:26
dec_vocab, dec_reverse_vocab, dec_vocab_size = build_vocab(all_target_sentences, is_target=True)##dec_vocab:word2idx,dec_reverse_vacab:idx2word,dec_vocab_size:28 #hyperParameters
n_epoch = 2000
hidden_size = 50
enc_emb_size = 20
dec_emb_size = 21
enc_sentence_length=10
dec_sentence_length=11 enc_inputs = tf.placeholder(tf.int32,shape=[None,enc_sentence_length],name='input_sentences')
sequence_lengths = tf.placeholder(tf.int32,shape=[None],name='sentences_length')
dec_inputs = tf.placeholder(tf.int32,shape=[None,dec_sentence_length+1],name='output_sentences') enc_inputs_t = tf.transpose(enc_inputs,perm=[1,0])
dec_inputs_t = tf.transpose(dec_inputs,perm=[1,0])
labels = tf.one_hot(dec_inputs_t, dec_vocab_size+2)
# labels & logits: [dec_sentence_length+1 x batch_size x dec_vocab_size+2] # enc_emb_inputs:list(enc_sent_len) of tensor[batch_size x embedding_size]
# Because `static_rnn` takes list inputs
enc_inputs_t = tf.unstack(enc_inputs_t)
dec_inputs_t = tf.unstack(dec_inputs_t) cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
dec_outputs,state = embedding_attention_seq2seq(
encoder_inputs=enc_inputs_t,
decoder_inputs=dec_inputs_t,
cell=cell,
num_encoder_symbols=enc_vocab_size+1,
num_decoder_symbols=dec_vocab_size+2,
embedding_size=enc_emb_size,
output_projection=None,
feed_previous=True
)
logits = tf.stack(dec_outputs)
predictions = tf.argmax(logits,axis=2)
predictions = tf.transpose(predictions,[1,0]) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
# training_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)
training_op = tf.train.RMSPropOptimizer(learning_rate=0.0001).minimize(loss) with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
loss_history = []
for epoch in range(n_epoch):
all_preds = []
epoch_loss = 0
for input_batch,target_batch in zip(input_batches,target_batches):
input_token_indices = []
target_token_indices = []
sentence_lengths = [] for input_sent in input_batch:
input_sent,sent_len = sent2idx(input_sent,vocab=enc_vocab,max_sentence_length=enc_sentence_length)
input_token_indices.append(input_sent)
sentence_lengths.append(sent_len) for target_sent in target_batch:
target_token_indices.append(sent2idx(target_sent,vocab=dec_vocab,max_sentence_length=dec_sentence_length,is_target=True)) batch_preds,batch_loss, _ = sess.run(
[predictions,loss, training_op],
feed_dict={
enc_inputs: input_token_indices,
sequence_lengths: sentence_lengths,
dec_inputs: target_token_indices
})
loss_history.append(batch_loss)
epoch_loss += batch_loss
all_preds.append(batch_preds) # Logging every 400 epochs
if epoch % 400 == 0:
print('Epoch', epoch)
for input_batch, target_batch, batch_preds in zip(input_batches, target_batches, all_preds):
for input_sent, target_sent, pred in zip(input_batch, target_batch, batch_preds):
print('\t', input_sent)
print('\t => ', idx2sent(pred, reverse_vocab=dec_reverse_vocab))
print('\tCorrent answer:', target_sent)
print('\tepoch loss: {:.2f}\n'.format(epoch_loss)) show_loss(loss_history)

测试embedding_seq2seq 的使用

 #-*-coding:utf8-*-

 __author="buyizhiyou"
__date = "2018-7-30" '''
测试embedding_rnn_seq2seq函数
''' import os
import pdb
import re
from collections import Counter
import matplotlib.pyplot as plt import tensorflow as tf
from seq2seq import embedding_rnn_seq2seq from utils import * os.environ["CUDA_VISIBLE_DEVICES"] = ""#choose GPU 1 input_batches = [
['Hi What is your name?', 'Nice to meet you!'],
['Which programming language do you use?', 'See you later.'],
['Where do you live?', 'What is your major?'],
['What do you want to drink?', 'What is your favorite beer?']] target_batches = [
['Hi this is Jaemin.', 'Nice to meet you too!'],
['I like Python.', 'Bye Bye.'],
['I live in Seoul, South Korea.', 'I study industrial engineering.'],
['Beer please!', 'Leffe brown!']] all_input_sentences = []
for input_batch in input_batches:
all_input_sentences.extend(input_batch)
all_target_sentences = []
for target_batch in target_batches:
all_target_sentences.extend(target_batch) enc_vocab, enc_reverse_vocab, enc_vocab_size = build_vocab(all_input_sentences)#enc_vocab:word2idx,enc_reverse_vacab:idx2word,enc_vocab_size:26
dec_vocab, dec_reverse_vocab, dec_vocab_size = build_vocab(all_target_sentences, is_target=True)##dec_vocab:word2idx,dec_reverse_vacab:idx2word,dec_vocab_size:28 #hyperParameters
n_epoch = 2000
hidden_size = 50
enc_emb_size = 20
dec_emb_size = 21
enc_sentence_length=10
dec_sentence_length=11 enc_inputs = tf.placeholder(tf.int32,shape=[None,enc_sentence_length],name='input_sentences')
sequence_lengths = tf.placeholder(tf.int32,shape=[None],name='sentences_length')
dec_inputs = tf.placeholder(tf.int32,shape=[None,dec_sentence_length+1],name='output_sentences') enc_inputs_t = tf.transpose(enc_inputs,perm=[1,0])
dec_inputs_t = tf.transpose(dec_inputs,perm=[1,0])
labels = tf.one_hot(dec_inputs_t, dec_vocab_size+2)
# labels & logits: [dec_sentence_length+1 x batch_size x dec_vocab_size+2] # enc_emb_inputs:list(enc_sent_len) of tensor[batch_size x embedding_size]
# Because `static_rnn` takes list inputs
enc_inputs_t = tf.unstack(enc_inputs_t)
dec_inputs_t = tf.unstack(dec_inputs_t) cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
dec_outputs,state = embedding_rnn_seq2seq(
encoder_inputs=enc_inputs_t,
decoder_inputs=dec_inputs_t,
cell=cell,
num_encoder_symbols=enc_vocab_size+1,
num_decoder_symbols=dec_vocab_size+2,
embedding_size=enc_emb_size,
output_projection=None,
feed_previous=True
)
logits = tf.stack(dec_outputs)
predictions = tf.argmax(logits,axis=2)
predictions = tf.transpose(predictions,[1,0]) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
# training_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)
training_op = tf.train.RMSPropOptimizer(learning_rate=0.0001).minimize(loss) with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
loss_history = []
for epoch in range(n_epoch):
all_preds = []
epoch_loss = 0
for input_batch,target_batch in zip(input_batches,target_batches):
input_token_indices = []
target_token_indices = []
sentence_lengths = [] for input_sent in input_batch:
input_sent,sent_len = sent2idx(input_sent,vocab=enc_vocab,max_sentence_length=enc_sentence_length)
input_token_indices.append(input_sent)
sentence_lengths.append(sent_len) for target_sent in target_batch:
target_token_indices.append(sent2idx(target_sent,vocab=dec_vocab,max_sentence_length=dec_sentence_length,is_target=True)) batch_preds,batch_loss, _ = sess.run(
[predictions,loss, training_op],
feed_dict={
enc_inputs: input_token_indices,
sequence_lengths: sentence_lengths,
dec_inputs: target_token_indices
})
loss_history.append(batch_loss)
epoch_loss += batch_loss
all_preds.append(batch_preds) # Logging every 400 epochs
if epoch % 400 == 0:
print('Epoch', epoch)
for input_batch, target_batch, batch_preds in zip(input_batches, target_batches, all_preds):
for input_sent, target_sent, pred in zip(input_batch, target_batch, batch_preds):
print('\t', input_sent)
print('\t => ', idx2sent(pred, reverse_vocab=dec_reverse_vocab))
print('\tCorrent answer:', target_sent)
print('\tepoch loss: {:.2f}\n'.format(epoch_loss)) show_loss(loss_history)

tensorflow seq2seq.py接口实例的更多相关文章

  1. 学习笔记CB014:TensorFlow seq2seq模型步步进阶

    神经网络.<Make Your Own Neural Network>,用非常通俗易懂描述讲解人工神经网络原理用代码实现,试验效果非常好. 循环神经网络和LSTM.Christopher ...

  2. ChatGirl 一个基于 TensorFlow Seq2Seq 模型的聊天机器人[中文文档]

    ChatGirl 一个基于 TensorFlow Seq2Seq 模型的聊天机器人[中文文档] 简介 简单地说就是该有的都有了,但是总体跑起来效果还不好. 还在开发中,它工作的效果还不好.但是你可以直 ...

  3. ChatGirl is an AI ChatBot based on TensorFlow Seq2Seq Model

    Introduction [Under developing,it is not working well yet.But you can just train,and run it.] ChatGi ...

  4. 规则引擎集成接口(八)Java接口实例

    接口实例 右键点击“对象库” —“添加接口实例”,如下图: 弹出如下窗体: 输入接口的参数信息: 点击接口“求和”,选择选项卡“求和操作”,点击添加图标   ,如下: 弹出如下窗体,勾选方法“coun ...

  5. MyBatis 源码分析——生成Statement接口实例

    JDBC的知识对于JAVA开发人员来讲在简单不过的知识了.PreparedStatement的作用更是胸有成竹.我们最常见用到有俩个方法:executeQuery方法和executeUpdate方法. ...

  6. Tensorflow 线性回归预测房价实例

    在本节中将通过一个预测房屋价格的实例来讲解利用线性回归预测房屋价格,以及在tensorflow中如何实现 Tensorflow 线性回归预测房价实例 1.1. 准备工作 1.2. 归一化数据 1.3. ...

  7. R用户的福音︱TensorFlow:TensorFlow的R接口

    ------------------------------------------------------------ Matt︱R语言调用深度学习架构系列引文 R语言︱H2o深度学习的一些R语言实 ...

  8. Easy-Mock模拟get接口和post接口实例

    1.先创建项目,再新建接口 创建项目入口:首页右下角 + 按钮 创建接口入口如下图: 关于mock的语法这里不做说明,可查看mock.js官方查看更详情的资料. 小tip:在Easy-Mock里面支持 ...

  9. postman+jmeter接口实例

    接口基础 一.为什么要单独测试接口? 1. 程序是分开开发的,前端还没有开发,后端已经开发完了,可以提前进入测试2. 接口直接返回的数据------越底层发现bug,修复成本是越低的3. 接口测试能模 ...

随机推荐

  1. [codeforces] 578C Weakness and Poorness || 三分

    原题 题目定义了两个变量: poorness表示一个区间内和的绝对值. weakness表示一个所有区间最大的poornesss 题目要求你求一个x使得 a1 − x, a2 − x, ..., an ...

  2. 一种有效的压缩感知方法——读Levin论文笔记

    原文链接:http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.122.2942&rep=rep1&type=pdf 1 基 ...

  3. 《R语言实战》读书笔记--第五章 高级数据管理

    本章内容: 数学和统计函数 字符处理函数 循环和条件执行 自编函数 数据整合与重塑 5.1一个数据处理难题 5.2数值和字符处理函数 分为数值函数和字符串函数,下面是数学函数截图: ceiling.f ...

  4. [ CodeVS冲杯之路 ] P3038

    不充钱,你怎么AC? 题目:http://codevs.cn/problem/3038/ 按照题目给定的方法,一步步推下去,直到推到1就输出次数 至于-1的话,一开始想直接用数组判重,但是怕T掉,于是 ...

  5. EVERYTHING 1.3.4参数

    命令行参数 操作符:     space    与 (AND)     |    或 (OR)     !    非 (NOT)     < >    分组     " &quo ...

  6. openGL深度缓冲区问题

    http://zhidao.baidu.com/question/368299839.html&__bd_tkn__=6aa9196c746cd3357f1eec74aeb127b395029 ...

  7. makefile函数集锦【转】

    转自:http://blog.csdn.net/turkeyzhou/article/details/8612841 Makefile  常用函数表一.字符串处理函数1.$(subst FROM,TO ...

  8. aiohttp的学习

    https://hubertroy.gitbooks.io/aiohttp-chinese-documentation/content/aiohttp%E6%96%87%E6%A1%A3/Client ...

  9. CPU负载监控

    #!/usr/bin/python #-*- encoding: utf-8 -*- import os import time while True: loadavg=os.popen(" ...

  10. spark streaming 异常No output streams registered, so nothing to execute

    实现spark streaming demo时,代码: public static void main (String[] args) { SparkConf conf = new SparkConf ...