일 | 월 | 화 | 수 | 목 | 금 | 토 |
---|---|---|---|---|---|---|
1 | ||||||
2 | 3 | 4 | 5 | 6 | 7 | 8 |
9 | 10 | 11 | 12 | 13 | 14 | 15 |
16 | 17 | 18 | 19 | 20 | 21 | 22 |
23 | 24 | 25 | 26 | 27 | 28 |
Tags
- Lagrange Multiplier
- #cudnn
- Rust설치
- 3D convolution
- canon mf416dw
- Hidden layer output
- VGGNet
- DenseNet
- log파일
- 포트번호 변경
- stepsize
- Ubuntu동글
- 듀얼부트
- restnet
- tensorboard
- #ubuntu 14.04(LTS)
- Xpad설치
- weight histogram
- Ubuntu 설치
- 전체내용
- 학습성공
- #python gpu
- Git branch 보이기
- 리눅스 비밀번호
- ubuntu 원격
- #enviroment variable
- #cuda
- twolay
- 화면확대고정
- 치환 기능
Archives
- Today
- Total
save the world
python training 본문
# -*- coding: utf-8 -*- #한글사용
#!/usr/bin/env python
import os
import tensorflow as tf
import numpy as np
import input_data
import time
t1 = time.time() # 시간측정 시작
# Load them!
loadpath = "/home/vml/PycharmProjects/sjchoi/data/makingnpz113.npz"
l = np.load(loadpath)
batch_size = 3
test_size = 5
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def model(X, w, w2, w3, w4, w_o, b, b2, b3, b4, b_o, p_keep_conv, p_keep_hidden):
l1a = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(X, w, # l1a shape=(?, 112, 112, 32)
strides=[1, 1, 1, 1], padding='SAME'), b))
print(l1a)
l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1], # l1 shape=(?, 56, 56, 32)
strides=[1, 2, 2, 1], padding='SAME')
print(l1)
l1 = tf.nn.dropout(l1, p_keep_conv)
l2a = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l1, w2, # l2a shape=(?, 56, 56, 64)
strides=[1, 1, 1, 1], padding='SAME'), b2))
print(l2a)
l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1], # l2 shape=(?, 28, 28, 64)
strides=[1, 2, 2, 1], padding='SAME')
print(l2)
l2 = tf.nn.dropout(l2, p_keep_conv)
l3a = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l2, w3, # l3a shape=(?, 28, 28, 128)
strides=[1, 1, 1, 1], padding='SAME'), b3))
print(l3a)
l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1], # l3 shape=(?, 14, 14, 128)
strides=[1, 2, 2, 1], padding='SAME')
print(l3)
l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]]) # reshape to (?, 2048)
print(w4.get_shape().as_list()[0])
l3 = tf.nn.dropout(l3, p_keep_conv)
l4 = tf.nn.relu(tf.nn.bias_add(tf.matmul(l3, w4), b4))
l4 = tf.nn.dropout(l4, p_keep_hidden)
print(l4)
pyx = tf.nn.bias_add(tf.matmul(l4, w_o), b_o)
print(pyx)
return pyx
trX, trY, teX, teY = l['trainimg'], l['trainlabel'], l['testimg'], l['testlabel']
trX = trX.reshape(-1, 112, 112, 3) # 112x112x3 input img
teX = teX.reshape(-1, 112, 112, 3) # 112x112x3 input img
X = tf.placeholder("float", [None, 112, 112, 3])
Y = tf.placeholder("float", [None, 20])
w = init_weights([3, 3, 3, 32]) # 3x3x3 conv, 32 outputs
w2 = init_weights([3, 3, 32, 64]) # 3x3x32 conv, 64 outputs
w3 = init_weights([3, 3, 64, 128]) # 3x3x64 conv, 128 outputs
w4 = init_weights([128 * 14 * 14, 625]) # FC 128 * 14 * 14 inputs, 625 outputs
w_o = init_weights([625, 20]) # FC 625 inputs, 20 outputs (labels)
b = init_weights([32])
b2 = init_weights([64])
b3 = init_weights([128])
b4 = init_weights([625])
b_o = init_weights([20])
p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w, w2, w3, w4, w_o, b, b2, b3, b4, b_o, p_keep_conv, p_keep_hidden)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)
# Launch the graph in a session
with tf.Session() as sess:
# you need to initialize all variables
tf.initialize_all_variables().run()
for i in range(100):
training_batch = zip(range(0, len(trX), batch_size),
range(batch_size, len(trX), batch_size))
for start, end in training_batch:
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
p_keep_conv: 0.8, p_keep_hidden: 0.5})
test_indices = np.arange(len(teX)) # Get A Test Batch
np.random.shuffle(test_indices)
test_indices = test_indices[0:test_size]
print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==
sess.run(predict_op, feed_dict={X: teX[test_indices],
Y: teY[test_indices],
p_keep_conv: 1.0,
p_keep_hidden: 1.0})))
t2 = time.time() # 시간측정 종료
print(t2-t1)
0, 0.2, 0.4 오직 세종류 만의 정확성을 보임.
2. 서울대 최성준 박사님 예제 사용
# -*- coding: utf-8 -*- #한글사용
"""
Convolutional Neural Network (CNN) with Custom Data
@Sungjoon Choi (sungjoon.choi@cpslab.snu.ac.kr
based on "https://github.com/aymericdamien/TensorFlow-Examples/"
"""
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
# Load them!
cwd = os.getcwd()
loadpath = cwd + "/data/makingnpz113.npz"
l = np.load(loadpath)
# See what's in here
l.files
# Parse data
trainimg = l['trainimg'] # 데이터에서 trainimg만 불러온다.
trainlabel = l['trainlabel'] # 데이터에서 trainglabel만 불러온다.
testimg = l['testimg']
testlabel = l['testlabel']
ntrain = trainimg.shape[0] # trainimg metrix의 행의 수 = ntrain 9056(11321*0.8)
nclass = trainlabel.shape[1] # trainimg metrix의 열의 수 = nclass 20 (class 20개)
dim = trainimg.shape[1] # trainimg metrix의 열의 수 = 112*112*3
ntest = testimg.shape[0] # testimg metrix의 행의 수 = ntest 11321-9056 개
print ("%d train images loaded" % ntrain)
print ("%d test images loaded" % ntest)
print ("%d dimensional input" % dim)
print ("%d classes" % nclass)
# Define convolutional neural network architecture
# Parameters
learning_rate = 0.001
training_epochs = 100
batch_size = 100
display_step = 10
# Network
n_input = dim # n_input = 112*112*3
n_output = nclass # n_output = 20
weights = {
'wc1': tf.Variable(tf.random_normal([3, 3, 3, 64], stddev=0.1)),
'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.1)),
'wd1': tf.Variable(tf.random_normal([28 * 28 * 128, 1024], stddev=0.1)),
'wd2': tf.Variable(tf.random_normal([1024, n_output], stddev=0.1))
}
biases = {
'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)),
'bc2': tf.Variable(tf.random_normal([128], stddev=0.1)),
'bd1': tf.Variable(tf.random_normal([1024], stddev=0.1)),
'bd2': tf.Variable(tf.random_normal([n_output], stddev=0.1))
}
def conv_basic(_input, _w, _b, _keepratio):
# Input
_input_r = tf.reshape(_input, shape=[-1, 112, 112, 3])
# Conv1
_conv1 = tf.nn.relu(tf.nn.bias_add(
tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME') # _conv1 shape=(?, 112, 112, 64)
, _b['bc1']))
print(_conv1)
_pool1 = tf.nn.max_pool(_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # _pool1 shape=(?, 56, 56, 64)
mean, var = tf.nn.moments(_pool1, [0, 1, 2])
_pool_dr1 = tf.nn.dropout(_pool1, _keepratio)
print(_pool1)
# Conv2
_conv2 = tf.nn.relu(tf.nn.bias_add(
tf.nn.conv2d(_pool_dr1, _w['wc2'], strides=[1, 1, 1, 1], padding='SAME') # _conv2 shape=(?, 56, 56, 128)
, _b['bc2']))
print(_conv2)
_pool2 = tf.nn.max_pool(_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # _pool2 shape=(?, 28, 28, 128)
mean, var = tf.nn.moments(_pool2, [0, 1, 2])
_pool_dr2 = tf.nn.dropout(_pool2, _keepratio)
print(_pool2)
# Vectorize
_dense1 = tf.reshape(_pool_dr2, [-1, _w['wd1'].get_shape().as_list()[0]]) # _dense1 shape=(?, 28*28*128)
print(_dense1)
# Fc1
_fc1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(_dense1, _w['wd1']), _b['bd1'])) #
_fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
print("asdfasdf",_fc1)
# Fc2
_out = tf.nn.bias_add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
print(_out)
# Return everything
out = {
'input_r': _input_r,
'conv1': _conv1,
'pool1': _pool1,
'pool1_dr1': _pool_dr1,
'conv2': _conv2,
'pool2': _pool2,
'pool_dr2': _pool_dr2,
'dense1': _dense1,
'fc1': _fc1,
'fc_dr1': _fc_dr1,
'out': _out
}
return out
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_output])
keepratio = tf.placeholder(tf.float32)
# Functions!
_pred = conv_basic(x, weights, biases, keepratio)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y))
optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
_corr = tf.equal(tf.argmax(_pred, 1), tf.argmax(y, 1)) # Count corrects
accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy
init = tf.initialize_all_variables()
print ("Network Ready to Go!")
# Launch the graph
sess = tf.Session()
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
num_batch = int(ntrain/batch_size)+1
# Loop over all batches
for i in range(num_batch):
randidx = np.random.randint(ntrain, size=batch_size)
batch_xs = trainimg[randidx, :]
batch_ys = trainlabel[randidx, :]
# Fit training using batch data
sess.run(optm, feed_dict={x: batch_xs, y: batch_ys, keepratio:0.7})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})/num_batch
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})
print (" Training accuracy: %.3f" % (train_acc))
test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel, keepratio:1.})
print (" Test accuracy: %.3f" % (test_acc))
print ("Optimization Finished!")
Epoch: 090/100 cost: 690440.812500000
Training accuracy: 0.110
Test accuracy: 0.174
Optimization Finished!
트레이닝 시킬 때 주의할 점은 내가 넣은 input data의 차원을 weight 들과 잘 맞추어야 한다.
'PYTHON' 카테고리의 다른 글
이미지로 데이터파일 만들기 (0) | 2016.06.05 |
---|---|
Pycharm Library 다운받기 (0) | 2016.06.03 |
Python gpu version 설치 (0) | 2016.06.03 |