笔者这几天在跟着莫烦学习TensorFlow,正好到迁移学习(至于什么是迁移学习,看这篇),莫烦老师做的是预测猫和老虎尺寸大小的学习。作为一个有为的学生,笔者当然不能再预测猫啊狗啊的大小啦,正好之前正好有做过猫狗大战数据集的图像分类,做好的数据都还在,二话不说,开撸。

既然是VGG16模型,当然首先上模型代码了:

 def conv_layers_simple_api(net_in):
with tf.name_scope('preprocess'):
# Notice that we include a preprocessing layer that takes the RGB image
# with pixels values in the range of 0-255 and subtracts the mean image
# values (calculated over the entire ImageNet training set).
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
net_in.outputs = net_in.outputs - mean # conv1
network = Conv2d(net_in, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv1_1')
network = Conv2d(network, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv1_2')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1') # conv2
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv2_1')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv2_2')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2') # conv3
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv3_1')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv3_2')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv3_3')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3') # conv4
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv4_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv4_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv4_3')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4') # conv5
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv5_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv5_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv5_3')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool5')
return network``
def conv_layers_simple_api(net_in):
with tf.name_scope('preprocess'):
# Notice that we include a preprocessing layer that takes the RGB image
# with pixels values in the range of 0-255 and subtracts the mean image
# values (calculated over the entire ImageNet training set).
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
net_in.outputs = net_in.outputs - mean # conv1
network = Conv2d(net_in, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv1_1')
network = Conv2d(network, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv1_2')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1') # conv2
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv2_1')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv2_2')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2') # conv3
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv3_1')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv3_2')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv3_3')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3') # conv4
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv4_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv4_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv4_3')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4') # conv5
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv5_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv5_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv5_3')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool5')
return network``
def conv_layers_simple_api(net_in):
with tf.name_scope('preprocess'):
# Notice that we include a preprocessing layer that takes the RGB image
# with pixels values in the range of 0-255 and subtracts the mean image
# values (calculated over the entire ImageNet training set).
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
net_in.outputs = net_in.outputs - mean # conv1
network = Conv2d(net_in, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv1_1')
network = Conv2d(network, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv1_2')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1') # conv2
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv2_1')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv2_2')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2') # conv3
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv3_1')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv3_2')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv3_3')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3') # conv4
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv4_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv4_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv4_3')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4') # conv5
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv5_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv5_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
name='conv5_3')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool5')
return network

笔者偷懒直接用的是TensorLayer库中的Vgg16模型,至于什么是tensorlayer请移步这里

按照莫烦老师的教程,改写最后的全连接层做二分类学习:

def fc_layers(net):
# 全连接层前的预处理
network = FlattenLayer(net, name='flatten')
# tf.layers.dense(self.flatten, 256, tf.nn.relu, name='fc6')
network = DenseLayer(network, n_units=256, act=tf.nn.relu, name='fc1_relu')
# network = DenseLayer(network, n_units=4096, act=tf.nn.relu, name='fc2_relu')
# self.out = tf.layers.dense(self.fc6, 1, name='out')
network = DenseLayer(network, n_units=2, act=tf.identity, name='fc3_relu')
return network

定义输入输出以及损失函数已及学习步骤:

 # 输入
x = tf.placeholder(tf.float32, [None, 224, 224, 3])
# 输出
y_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')
net_in = InputLayer(x, name='input')
# net_cnn = conv_layers(net_in) # professional CNN APIs
net_cnn = conv_layers_simple_api(net_in) # simplified CNN APIs
network = fc_layers(net_cnn)
y = network.outputs
# probs = tf.nn.softmax(y)
y_op = tf.argmax(tf.nn.softmax(y), 1)
cost = tl.cost.cross_entropy(y, y_, name='cost')
correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.float32), tf.cast(y_, tf.float32))
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 定义 optimizer
train_params = network.all_params[26:]
# print(train_params)
global_step = tf.Variable(0)
# --------------学习速率的设置(学习速率呈指数下降)--------------------- #将 global_step/decay_steps 强制转换为整数
# learning_rate = tf.train.exponential_decay(1e-2, global_step, decay_steps=1000, decay_rate=0.98, staircase=True)
train_op = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999,
epsilon=1e-08, use_locking=False).minimize(cost, var_list=train_params)

读取数据读取训练、验证数据,加载模型参数:

 img, label = read_and_decode("F:\\001-python\\train.tfrecords")
img_v, label_v = read_and_decode("F:\\001-python\\val.tfrecords")
# 使用shuffle_batch可以随机打乱输入
X_train, y_train = tf.train.shuffle_batch([img, label],
batch_size=30, capacity=400,
min_after_dequeue=300)
X_Val, y_val = tf.train.shuffle_batch([img_v, label_v],
batch_size=30, capacity=400,
min_after_dequeue=300)
tl.layers.initialize_global_variables(sess)
network.print_params()
network.print_layers()
npz = np.load('vgg16_weights.npz')
params = []
for val in sorted(npz.items())[0:25]:
# print(" Loading %s" % str(val[1].shape))
params.append(val[1])
加载预训练的参数
tl.files.assign_params(sess, params, network)

加载好之后,开始训练,200个epoch:

 for epoch in range(n_epoch):
start_time = time.time()
val, l = sess.run([X_train, y_train])
for X_train_a, y_train_a in tl.iterate.minibatches(val, l, batch_size, shuffle=True):
sess.run(train_op, feed_dict={x: X_train_a, y_: y_train_a})
if epoch + 1 == 1 or (epoch + 1) % 5 == 0:
print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time))
train_loss, train_acc, n_batch = 0, 0, 0
for X_train_a, y_train_a in tl.iterate.minibatches(val, l, batch_size, shuffle=True):
err, ac = sess.run([cost, acc], feed_dict={x: X_train_a, y_: y_train_a})
train_loss += err
train_acc += ac
n_batch += 1
print(" train loss: %f" % (train_loss / n_batch))
print(" train acc: %f" % (train_acc / n_batch))

保存训练的参数:

 tl.files.save_npz(network.all_params, name='model.npz', sess=sess)

下面就是开始训练啦,笔者很高兴的拿着自己的笔记本显卡呼呼的跑了一遍:

~~~~~~~~~~~~~~~~~~~~~~~~下面是漫长的等待

.......
[TL] Epoch 138 of 150 took 0.999402s
[TL] val loss: 0.687194
[TL] val acc: 0.562500
[TL] Epoch 140 of 150 took 3.782207s
[TL] val loss: 0.619966
[TL] val acc: 0.750000
[TL] Epoch 142 of 150 took 0.983802s
[TL] val loss: 0.685686
[TL] val acc: 0.562500
[TL] Epoch 144 of 150 took 0.986604s
[TL] val loss: 0.661224
[TL] val acc: 0.687500
[TL] Epoch 146 of 150 took 1.022403s
[TL] val loss: 0.675885
[TL] val acc: 0.687500
[TL] Epoch 148 of 150 took 0.991802s
[TL] val loss: 0.682124
[TL] val acc: 0.625000
[TL] Epoch 150 of 150 took 3.487811s
[TL] val loss: 0.674932
[TL] val acc: 0.687500
[TL] Total training time: 319.859640s
[TL] [*] model.npz saved

额~~~~~~~~~~~~~~~~~

0.68的正确率,群里一位朋友看了之后说:跟猜差不多了(一脸黑线)。问题出哪儿呢?难道是笔者训练的次数不够多?莫烦老师可是100次就能出很好的结果啊

不管怎么样,要试试,笔者于是加载刚刚保存的model.npz参数继续跑100个epoch

~~~~~~~~~~~~~~~~~~~~~~~~又是漫长的等待

[TL] Epoch 1 of 100 took 8.477617s
[TL] val loss: 0.685957
[TL] val acc: 0.562500
[TL] Epoch 2 of 100 took 0.999402s
[TL] val loss: 0.661529
[TL] val acc: 0.625000
......
[TL] Epoch 94 of 100 took 0.992208s
[TL] val loss: 0.708815
[TL] val acc: 0.562500
[TL] Epoch 96 of 100 took 0.998406s
[TL] val loss: 0.710636
[TL] val acc: 0.562500
[TL] Epoch 98 of 100 took 0.992807s
[TL] val loss: 0.621505
[TL] val acc: 0.687500
[TL] Epoch 100 of 100 took 0.986405s
[TL] val loss: 0.670647
[TL] val acc: 0.625000
[TL] Total training time: 156.734633s
[TL] [*] model.npz saved

坑爹啊这是,还不如之前的结果。

笔者陷入深深的沉思中,难道是改了全连接层导致的?于是笔者又把之前去掉的全连接层加上:

 def fc_layers(net):
# 全连接层前的预处理
network = FlattenLayer(net, name='flatten')
# tf.layers.dense(self.flatten, 256, tf.nn.relu, name='fc6')
network = DenseLayer(network, n_units=256, act=tf.nn.relu, name='fc1_relu')
network = DenseLayer(network, n_units=256, act=tf.nn.relu, name='fc2_relu')
# self.out = tf.layers.dense(self.fc6, 1, name='out')
network = DenseLayer(network, n_units=2, act=tf.identity, name='fc3_relu')
return network

接着训练

~~~~~~~~~~~~~~~~~~~~~~~~下面又是漫长的等待

 [TL] Epoch 1 of 100 took 8.477229s
[TL] val loss: 2.370650
[TL] val acc: 0.562500
...
[TL] Epoch 100 of 100 took 1.016002s
[TL] val loss: 0.762171
[TL] val acc: 0.437500
[TL] Total training time: 156.836465s
[TL] [*] model.npz saved

还是一样,笔者已崩溃了,一定是哪儿不对啊啊啊....于是笔者去翻莫烦老师的代码,一点点对下来,每一层参数肯定不会有错,那就是在训练设置的参数有问题。

 self.train_op = tf.train.RMSPropOptimizer(0.001).minimize(self.loss) #莫烦的代码
train_op = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999,
epsilon=1e-08, use_locking=False).minimize(cost, var_list=train_params)#笔者的

看到train_params难道是这个train_params?笔者只优化了最后的全连接层参数而莫烦老师优化的是全部参数

已经深夜了,笔者表示即使不睡觉也要跑一遍试试,于是改成

 # 定义 optimizer
train_params = network.all_params
~~~~~~~~~~~~~~~~~~~~~~~~于是又是是漫长的等待 [TL] Epoch 1 of 100 took 20.286640s
[TL] val loss: 11.938850
[TL] val acc: 0.312500
[TL] Epoch 2 of 100 took 3.091806s
[TL] val loss: 2.890055
[TL] val acc: 0.625000
[TL] Epoch 4 of 100 took 3.074205s
[TL] val loss: 24.055895
[TL] val acc: 0.687500
[TL] ....
[TL] val loss: 0.699907
[TL] val acc: 0.500000
[TL] Epoch 98 of 100 took 3.089206s
[TL] val loss: 0.683627
[TL] val acc: 0.562500
[TL] Epoch 100 of 100 took 3.091806s
[TL] val loss: 0.708496
[TL] val acc: 0.562500
[TL] Total training time: 375.727307s
[TL] [*] model.npz saved

效果变得更差了....

排除参数的问题,已经深夜1点了,明天还要上班,不得不睡啦。

继续崩溃第三天~~~

第四天~~~

第五天,今天供应商过来公司调试机器,正好是一个学图像处理的小伙子,我提到这个说:我为啥训练了这么多代为啥还是像猜一样的概率....?小伙儿说:莫不是过拟合了吧?我说:不可能啊现成的数据现成的模型和参数,不应该的啊!

不过我还是得检查一下数据处理的代码

 # 生成是数据文件
def create_record(filelist):
random.shuffle(filelist)
i = 0
writer = tf.python_io.TFRecordWriter(recordpath)
for file in filelist:
name = file.split(sep='.')
lable_val = 0
if name[0] == 'cat':
lable_val = 0
else:
lable_val = 1
img_path = file_dir + file
img = Image.open(img_path)
img = img.resize((240, 240))
img_raw = img.tobytes() # 将图片转化为原生bytes
example = tf.train.Example(features=tf.train.Features(feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[lable_val])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
})) #example对象对label和image进行封装
writer.write(example.SerializeToString())
i=i+1
print(name[1])
print(lable_val)
print(i)
writer.close()
# 用队列形式读取文件
def read_and_decode(filename):
# 根据文件名生成一个队列
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) # 返回文件名和文件
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
})
img = tf.decode_raw(features['img_raw'], tf.uint8)
img = tf.reshape(img, [224, 224, 3])
img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
label = tf.cast(features['label'], tf.int32)
return img, label

img = tf.cast(img, tf.float32) * (1. / 255) - 0.5  难道是这一步处理多余?注销之后,训练模型

 Epoch 85 of 200 took 1.234071s
train loss: 14.689816
train acc: 0.900000
[TL] [*] model3.npz saved
Epoch 90 of 200 took 1.241071s
train loss: 17.104382
train acc: 0.800000
[TL] [*] model3.npz saved
Epoch 95 of 200 took 1.236071s
train loss: 11.190630
train acc: 0.850000
[TL] [*] model3.npz saved
Epoch 100 of 200 took 1.238071s
train loss: 0.000000
train acc: 1.000000
[TL] [*] model3.npz saved
Epoch 105 of 200 took 1.236071s
train loss: 7.622324
train acc: 0.900000
[TL] [*] model3.npz saved
Epoch 110 of 200 took 1.234071s
train loss: 2.164670
train acc: 0.950000
[TL] [*] model3.npz saved
Epoch 115 of 200 took 1.237071s
train loss: 0.000000
train acc: 1.000000
[TL] [*] model3.npz saved

准确度1,停停停...不用跑完了,Perfect!

原来如此,必须要真实的像素值.......心好累......,笔者已经不记得哪儿抄来的这一行了。

嗯,VGG16模型的迁移学习到此结束,代码见github

用tensorflow迁移学习猫狗分类的更多相关文章

  1. paddlepaddle实现猫狗分类

    目录 1.预备工作 1.1 数据集准备 1.2 数据预处理 2.训练 2.1 模型 2.2 定义训练 2.3 训练 3.预测 4.参考文献 声明:这是我的个人学习笔记,大佬可以点评,指导,不喜勿喷.实 ...

  2. 人工智能——CNN卷积神经网络项目之猫狗分类

    首先先导入所需要的库 import sys from matplotlib import pyplot from tensorflow.keras.utils import to_categorica ...

  3. 将迁移学习用于文本分类 《 Universal Language Model Fine-tuning for Text Classification》

    将迁移学习用于文本分类 < Universal Language Model Fine-tuning for Text Classification> 2018-07-27 20:07:4 ...

  4. TensorFlow迁移学习的识别花试验

    最近学习了TensorFlow,发现一个模型叫vgg16,然后搭建环境跑了一下,觉得十分神奇,而且准确率十分的高.又上了一节选修课,关于人工智能,老师让做一个关于人工智能的试验,于是觉得vgg16很不 ...

  5. ML.NET 示例:图像分类模型训练-首选API(基于原生TensorFlow迁移学习)

    ML.NET 版本 API 类型 状态 应用程序类型 数据类型 场景 机器学习任务 算法 Microsoft.ML 1.5.0 动态API 最新 控制台应用程序和Web应用程序 图片文件 图像分类 基 ...

  6. Google Tensorflow 迁移学习 Inception-v3

    附上代码加数据地址 https://github.com/Liuyubao/transfer-learning ,欢迎参考. 一.Inception-V3模型 1.1 详细了解模型可参考以下论文: [ ...

  7. 猫狗分类--Tensorflow实现

    贴一张自己画的思维导图  数据集准备 kaggle猫狗大战数据集(训练),微软的不需要FQ 12500张cat 12500张dog 生成图片路径和标签的List step1:获取D:/Study/Py ...

  8. 100天搞定机器学习|day40-42 Tensorflow Keras识别猫狗

    100天搞定机器学习|1-38天 100天搞定机器学习|day39 Tensorflow Keras手写数字识别 前文我们用keras的Sequential 模型实现mnist手写数字识别,准确率0. ...

  9. 用tensorlayer导入Slim模型迁移学习

    上一篇博客[用tensorflow迁移学习猫狗分类]笔者讲到用tensorlayer的[VGG16模型]迁移学习图像分类,那麽问题来了,tensorlayer没提供的模型怎么办呢?别担心,tensor ...

随机推荐

  1. MySQLdb、 flask-MySQLdb 、MySQL-python 安装失败

    今天在学习flask的时候,学习到数据库部分,连接mysql生成表,运行程序报错误:No module named MySQLdb 此时 需要安装 以下两个中任何一个 pip install flas ...

  2. Lintcode373 Partition Array by Odd and Even solution 题解

    [题目描述] Partition an integers array into odd number first and even number second. 分割一个整数数组,使得奇数在前偶数在后 ...

  3. python 保障系统(一)

    python  保障系统 from django.shortcuts import render,redirect,HttpResponse from app01 import models from ...

  4. MongoDB系列五(地理空间索引与查询).

    一.经纬度表示方式 MongoDB 中对经纬度的存储有着自己的一套规范(主要是为了可以在该字段上建立地理空间索引).包括两种方式,分别是 Legacy Coordinate Pairs (这个词实在不 ...

  5. PIL绘图

    # coding:utf-8 # PIL的ImageDraw 提供了一系列绘图方法,让我们可以直接绘图.比如要生成字母验证码图片 from PIL import Image, ImageDraw, I ...

  6. 原生js中实现全选和反选功能

    <!DOCTYPE html>      <html>      <head lang="en">          <meta char ...

  7. *Boosting*笔记

    集成算法之boosting 集成方法  1. Parallel methods:   1. bagging   2. Random Forest  2. Sequence methods:   1. ...

  8. Python网络爬虫笔记(五):下载、分析京东P20销售数据

    (一)  分析网页 下载下面这个链接的销售数据 https://item.jd.com/6733026.html#comment 1.      翻页的时候,谷歌F12的Network页签可以看到下面 ...

  9. Mysql之表的操作与索引操作

    表的操作: 1.表的创建: create table if not exists table_name(字段定义); 例子: create table if not exists user(id in ...

  10. STM32 - SYSTICK(系统滴答定时器)

    SysTick定时器被捆绑在NVIC中,用于产生SYSTICK异常(异常号:15).在以前,大多操作系统需要一个硬件定时器来产生操作系统需要的滴答中断,作为整个系统的时基.例如,为多个任务许以不同数目 ...