日時 | |
関係者(共同研究者) | |
import tensorflow as tf
import numpy as np
import requests
## 定数
IMG_SIZE = 1280 ## 学習させる画像の縦幅・横幅
IMG_LENGTH = IMG_SIZE * IMG_SIZE * 3 ## 学習させる画像データ長
LABEL_CNT = 3 ## ラベルの種類の数
IMG_DOMAIN = 'ftp.yz.yamagata-u.ac.jp' ## 画像が取得できるURLのドメイン名
## 学習に必要な変数の初期化
x = tf.placeholder(tf.float32, shape=[None, IMG_LENGTH])
W = tf.Variable(tf.zeros([IMG_LENGTH, LABEL_CNT]))
b = tf.Variable(tf.zeros([LABEL_CNT]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, shape=[None, LABEL_CNT])
#cross_entropy = tf.reduce_sum(y_ * tf.log(y))
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
cross_entropy = tf.reduce_sum(tf.square(y-y_))
train_step = tf.train.GradientDescentOptimizer(1e-7).minimize(cross_entropy)
#train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
## CSVファイルをワークキューとして設定
queue = tf.train.string_input_producer(['image20.csv'])
reader = tf.TextLineReader()
key, val = reader.read(queue)
url, label = tf.decode_csv(val, [[''], [0]])
myconfig = tf.ConfigProto(
intra_op_parallelism_threads=16 )
saver = tf.train.Saver()
sess = tf.Session(config=myconfig)
sess.run(tf.global_variables_initializer())
## バッチ処理の準備
batch_url, batch_label = tf.train.batch([url, label], batch_size=20)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
image2 = []
label2 = []
urls, labels = sess.run([batch_url, batch_label])
for url in urls :
#con = http.client.HTTPConnection(IMG_DOMAIN)
#response = con.request('GET', "/pub/camera/AXIS_000/201708021519.jpg")
#r1 = response.getresponse()
#image = r1.read()
r=requests.get(url)
image = r.content
## 画像をTensorFlowで処理できるように変換
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize_image_with_crop_or_pad(image, IMG_SIZE, IMG_SIZE)
image = tf.reshape(image, [-1])
image_val = sess.run(image).astype(np.float32) / 255.0
image2.append(image_val)
for label in labels :
tmp = np.zeros(LABEL_CNT)
tmp[label] = 1
label2.append(tmp)
image2 = np.asarray(image2)
lable2 = np.asarray(label2)
image2_len = len(image2)
print (image2_len)
i = 0
for i in range(100):
_,loss = sess.run([train_step, cross_entropy], feed_dict={x: image2[0:image2_len], y_: label2[0:image2_len]})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(accuracy, feed_dict={x: image2, y_: label2})
print('Step %d, Loss %f' % (i, loss))
saver.save(sess,"ckpts/model1.ckpt")
sess.close()
西暦 | 令和 | 🔷 平成 | 🔷 昭和 | 🔷 大正 | 🔷 明治 |
---|---|---|---|---|---|
2012 | R-6 | H24 | S87 | T101 | M145 |
2013 | R-5 | H25 | S88 | T102 | M146 |
2014 | R-4 | H26 | S89 | T103 | M147 |
2015 | R-3 | H27 | S90 | T104 | M148 |
2016 | R-2 | H28 | S91 | T105 | M149 |
2017 | R-1 | H29 | S92 | T106 | M150 |
2018 | R0 | H30 | S93 | T107 | M151 |
2019 | R1 | H31 | S94 | T108 | M152 |
2020 | R2 | H32 | S95 | T109 | M153 |
2021 | R3 | H33 | S96 | T110 | M154 |
2022 | R4 | H34 | S97 | T111 | M155 |