不要问我为什么要写个这篇文章,就是闲的,学生党最不缺的就是时间了,不会打游戏的我还被队友无情的抛弃(  ̄ー ̄)

于是看见了教务管理系统的验证码服务,看着总是有些不顺眼,算了,搞掉吧,接下来,我将带大家一起如何开搞一个简单的验证码。

验证码识别效果展示

上来先看看验证码识别的效果和演示
一个待识别的验证码

执行代码,返回识别结果

1
2
3
4
python train.py
train or predict: predict
filename: ../model/0.gif
vf20

接下来开始正题

获取验证码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/4/2 上午9:52
# @Author : tudoudou
# @File : get_picture.py
# @Software: PyCharm

import os
from urllib.request import urlretrieve


def save_img(img_url, file_name, file_path='data'):
try:
if not os.path.exists(file_path):
print('文案夾', file_path, '無法找到,重新創建')
os.makedirs(file_path)

filename = '{}{}{}{}'.format(file_path, os.sep, file_name, '.gif')

urlretrieve(img_url, filename=filename)
except IOError as e:
print('文案操作失敗', e)
except Exception as e:
print('error :', e)


if __name__ == '__main__':
i = 0
while (i < 100):
save_img('http://jwsys.ctbu.edu.cn/CheckCode.aspx', i)
# 随便从网上搜了一个正方教务管理的登陆页面,防止暴漏我的信息~( TロT)σ
i += 1
print('end !')

拿到了验证码,之后开始人工标记,之后进行标记。
标记验证码展示

分割图片

分割图片

部分代码实现

1
2
3
4
boxs = [(5, 1, 17, 21), (17, 1, 29, 21), (29, 1, 41, 21), (41, 1, 53, 21)]
img = Image.open('img_root').convert('L').convert('1')
for x in range(len(boxs)):
roi = img.crop(boxs[x])

生成数据文件(tfrecord)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/4/2 上午9:53
# @Author : tudoudou
# @File : dataset.py
# @Software: PyCharm


import os
from TFtools import TFRecord
import numpy as np
from PIL import Image
import tensorflow as tf
import time

dic = {'9': 2, 'f': 15, 'z': 9, 'o': 16, '7': 4, '5': 33, '8': 11, 'w': 35, '0': 3, 'y': 32, 'k': 29, 'b': 28, 'n': 6,
'r': 0, 'j': 19, 's': 13, 'i': 5, '3': 26, 'x': 25, 'u': 1, 'a': 14, 't': 12, 'p': 34, '6': 8, 'q': 23, 'h': 17,
'd': 21, '1': 10, 'v': 22, 'g': 7, '4': 24, '2': 31, 'c': 20, 'l': 30, 'e': 27, 'm': 18}

dic_ = {0: 'r', 1: 'u', 2: '9', 3: '0', 4: '7', 5: 'i', 6: 'n', 7: 'g', 8: '6', 9: 'z', 10: '1', 11: '8', 12: 't',
13: 's', 14: 'a', 15: 'f', 16: 'o', 17: 'h', 18: 'm', 19: 'j', 20: 'c', 21: 'd', 22: 'v', 23: 'q', 24: '4',
25: 'x', 26: '3', 27: 'e', 28: 'b', 29: 'k', 30: 'l', 31: '2', 32: 'y', 33: '5', 34: 'p', 35: 'w'}


def read_tfrecord(tfr=None, type_='train', num=1500):
if tfr == None:
tfr = TFRecord({'img': [bytes], 'labels': [int] * 36})
if type_ == 'train':
num = 1500
example = tfr.reader('./tfrecord/*.tfrecord')
else:
num = 100
example = tfr.reader('./tfrecord/*.tfrecords')
img = tf.decode_raw(example['img'], tf.uint8)
img = tf.reshape(img, [20, 12])
lab = example['labels']
images, labels = [], []

with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)

for i in range(num):
res1, res2 = sess.run([img, lab])

# res1 *= 255
# Image.fromarray(res1).show()
# print(dic_[np.argmax(res2)])
images.append([res1])
labels.append(res2)

coord.request_stop()
coord.join(threads)

return np.array(images), np.array(labels)


def write_tfrecord(tfr):
writer = tfr.writer('./tfrecord/', pre_file_capacity=500)
boxs = [(5, 1, 17, 21), (17, 1, 29, 21), (29, 1, 41, 21), (41, 1, 53, 21)]
lab = np.zeros((1, 36))
for parent, dirnames, filenames in os.walk('./data_biaoji'):
for i in filenames:
if len(i) != 8:
print(i)
raise ValueError
img = Image.open(os.path.join(parent, i)).convert('L').convert('1')

for x in range(len(boxs)):
roi = img.crop(boxs[x])
roi = np.array(roi).reshape((1, 240))
lab = np.zeros((1, 36))
lab[0][dic[i[x]]] = 1
lab = lab.astype(np.int)
writer.add_example({'img': [roi.astype(np.uint8).tostring()], 'labels': lab[0]})
writer.close()


if __name__ == '__main__':
tfr = TFRecord({'img': [bytes], 'labels': [int] * 36})
write_tfrecord(tfr)

写个神经网络模型

使用 Keras 构建神经网络模型

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/4/2 上午9:53
# @Author : tudoudou
# @File : model.py
# @Software: PyCharm


import tensorflow as tf
import keras
from keras.layers import Conv2D, MaxPooling2D, Input
from keras.layers import BatchNormalization, Activation
from keras.layers import Dense, AveragePooling2D, Flatten
from keras.layers import Dropout
from keras.models import model_from_json, load_model, Model


class Net():

def __init__(self):
print('init ')

def my_model(self):
inputs = Input(shape=(1, 20, 12))

x = Conv2D(
filters=16, kernel_size=(2, 4), padding='same', name='Conv1', data_format='channels_first')(inputs)
x = BatchNormalization(axis=1, name='BN_Conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(1, 1), data_format='channels_first')(x)

x = Conv2D(
filters=4, kernel_size=(2, 2), padding='same', name='Conv1', data_format='channels_first')(inputs)
x = BatchNormalization(axis=1, name='BN_Conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(1, 1), data_format='channels_first')(x)

x = AveragePooling2D((2, 2), name='avg_pool')(x)
x = Flatten()(x)
x = Dropout(0.2)(x)
x = Dense(36, activation='softmax', name='sotfmax36')(x)

model = Model(inputs, x, name='My_Resnet')

return model

def create_model(self):
model = self.my_model()
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])

return model

开始训练

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from TFtools import TFRecord
from model import Net
from dataset import read_tfrecord, dic_
from keras.models import load_model
from PIL import Image
import numpy as np


def train():
app = Net()
app = app.create_model()
images, labels = read_tfrecord()
images_val, labels_val = read_tfrecord(type_='val')

app.fit(images, labels, 128, 100, validation_data=(images_val, labels_val))

app.save('../model/net.h5')


if __name__ == '__main__':
do=input('train or predict: ')
# train()
if do not in ['train','predict']:
raise ValueError('do must be train or predict !')

if do=='train':
train()
else:
boxs = [(5, 1, 17, 21), (17, 1, 29, 21), (29, 1, 41, 21), (41, 1, 53, 21)]
app = load_model('../model/net.h5')
while True:
name = input("filename: ")
img = Image.open(name).convert('L').convert('1')
name = ''
for x in range(len(boxs)):
aaa = []
roi = img.crop(boxs[x])
roi = np.array(roi)
aaa.append([roi])
aaa = np.array(aaa)
aaa = app.predict(aaa)
name += dic_[np.argmax(aaa)]
print(name)

训练完成后就可以调用模型玩了。

看不懂?看不懂就算了,略略略(´ー∀ー`)

附上代码仓库地址:https://github.com/ctudoudou/Captcha-Recognition2