def mnist_by_keras(): import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler, ReduceLROnPlateau from keras import backend as K print('mnist_by_keras:start!') batch_size = 128 num_classes = 10 epochs = 1 img_rows, img_cols = 28, 28 (x_train, y_train), (x_test, y_test) = mnist.load_data() #Kerasのバックエンドで動くTensorFlowとTheanoでは入力チャンネルの順番が違うので場合分けして書いています if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32')#float型(データ数)×28×28ndarray x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') y_train = y_train.astype('int32')#int型(データ数)×10ndarray y_test = y_test.astype('int32') y_train = keras.utils.np_utils.to_categorical(y_train, num_classes) y_test = keras.utils.np_utils.to_categorical(y_test, num_classes) checkpointer = ModelCheckpoint(filepath='model.{epoch:02d}-{val_loss:.2f}.hdf5', verbose=1, save_best_only=True) csv_logger = CSVLogger('model.log') model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test),callbacks=[csv_logger,checkpointer]) print('mnist_by_keras:finish!') def mnist_by_tf(): import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data sess = tf.InteractiveSession() # 再現性の確保のために乱数シードを固定(数値は何でもよい) tf.set_random_seed(12345) # 入力データ # MNISTのワンホット表現での読み込み mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # 0 入力画像 x = tf.placeholder(tf.float32, name='x') # 1 サイズ変更 x_1 = tf.reshape(x, [-1, 28, 28, 1])#?4階テンソルの1階目の-1(バッチサイズらしいが)とはなんぞや? # 2 畳み込み # ランダムカーネル k_0 = tf.Variable(tf.truncated_normal([4, 4, 1, 10], mean=0.0, stddev=0.1))#?最初の4,4はフィルターサイズ、次の1は出力チャンネル数、10はフィルターの枚数で合ってる? # 畳み込み x_2 = tf.nn.conv2d(x_1, k_0, strides=[1, 3, 3, 1], padding='VALID') # 3 活性化関数 x_3 = tf.nn.relu(x_2) # 4 プーリング x_4 = tf.nn.max_pool(x_3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') # 5 サイズ変更 x_5 = tf.reshape(x_4, [-1, 160])#?最初の-1は例によってわからず、160はフィルターストライドとプーリングで28→4へと情報が圧縮された分で、4×4×10ということだと思う。? # 6 全結合 # 重みとバイアス w_1 = tf.Variable(tf.zeros([160, 40]))#?160はいいけど、40はどこから出てきた? b_1 = tf.Variable([0.1] * 40)#?サイズ40の1次元配列で合ってる? # 全結合 x_6 = tf.matmul(x_5, w_1) + b_1#?mutmul->テンソル積って何?出力のx_6はサイズ40の1次元配列? # 7 活性化関数 x_7 = tf.nn.relu(x_6) # 8 全結合 # 重みとバイアス w_2 = tf.Variable(tf.zeros([40, 10]))#?10はどこから出てきた? b_2 = tf.Variable([0.1] * 10) # 全結合 x_8 = tf.matmul(x_7, w_2) + b_2 # 9 確率化 y = tf.nn.softmax(x_8) # 10 損失関数の最小化 # 正解ラベル labels = tf.placeholder(tf.float32, name='labels') # 損失関数(交差エントロピー)と最適化処理(Adam) loss = -tf.reduce_sum(labels * tf.log(y))#?内積の和っぽいのを計算してるのか? optimizer = tf.train.AdamOptimizer().minimize(loss) # 11 精度検証 prediction_match = tf.equal(tf.argmax(y, axis=1), tf.argmax(labels, axis=1))#?argmaxとaxisって何?prediction_matchのデータ型ってサイズサンプルデータ数の1次元配列で合ってるの? accuracy = tf.reduce_mean(tf.cast(prediction_match, tf.float32), name='accuracy')#?meanってことは平均? # パラメーター # バッチサイズ BATCH_SIZE = 32 # 学習回数 NUM_TRAIN = 10_000 # 学習中の出力頻度 OUTPUT_BY = 500 # 学習の実行 sess.run(tf.global_variables_initializer()) for i in range(NUM_TRAIN): batch = mnist.train.next_batch(BATCH_SIZE) inout = {x: batch[0], labels: batch[1]}#?batch[0]が入力のベクトル、batch[1]が正解ラベルのベクトルだと思う。inoutって何だ? if i % OUTPUT_BY == 0: train_accuracy = accuracy.eval(feed_dict=inout)#?feed_dictって何? print('step {:d}, accuracy {:.2f}'.format(i, train_accuracy)) optimizer.run(feed_dict=inout) # テストデータによる精度検証 test_accuracy = accuracy.eval(feed_dict={x: mnist.test.images, labels: mnist.test.labels}) print('test accuracy {:.2f}'.format(test_accuracy)) def x2jou_by_keras(): import csv import numpy as np import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler, ReduceLROnPlateau from keras import backend as K print('x2jou_by_keras:start!') batch_size = 10 num_classes = 2 epochs = 1000 nyuryoku_csv = np.loadtxt('y_xjijou.csv',delimiter=",",skiprows=1) x_train = nyuryoku_csv[0:100,0:2] x_test = nyuryoku_csv[100:200,0:2] y_train_mae = nyuryoku_csv[0:100,2:3].astype(np.int) y_test_mae = nyuryoku_csv[100:200,2:3].astype(np.int) #y_train = np.zeros((100,2)) #y_test = np.zeros((100,2)) #for num in range(100): #y_train[num][y_train_mae[num][0]] = 1 #y_test[num][y_test_mae[num][0]] = 1 #y_train= np.reshape(y_train,(100)) #y_test= np.reshape(y_test,(100)) #img_rows, img_cols = 28, 28 #(x_train, y_train), (x_test, y_test) = mnist.load_data() #Kerasのバックエンドで動くTensorFlowとTheanoでは入力チャンネルの順番が違うので場合分けして書いています if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, 2) x_test = x_test.reshape(x_test.shape[0], 1, 2) input_shape = (1, 2) else: x_train = x_train.reshape(x_train.shape[0], 2, 1) x_test = x_test.reshape(x_test.shape[0], 2, 1) input_shape = (2, 1) #x_train = x_train.astype('float32')#float型(データ数)×28×28ndarray #x_test = x_test.astype('float32') #x_train /= 255 #x_test /= 255 #print('x_train shape:', x_train.shape) #print(x_train.shape[0], 'train samples') #print(x_test.shape[0], 'test samples') #y_train = y_train.astype('int32')#int型(データ数)×10ndarray #y_test = y_test.astype('int32') y_train = keras.utils.np_utils.to_categorical(y_train_mae, num_classes) y_test = keras.utils.np_utils.to_categorical(y_test_mae, num_classes) checkpointer = ModelCheckpoint(filepath='jijou_model.{epoch:02d}-{val_loss:.2f}.hdf5', verbose=1, save_best_only=True, period = 100) csv_logger = CSVLogger('model.log') model = Sequential() model.add(Dense(10, activation='relu', input_shape = input_shape)) model.add(Dense(10, activation='relu')) model.add(Flatten()) model.add(Dense(num_classes, activation='softmax')) print('model_setting:end!') model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) print('model_compile:end!') model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,validation_data=(x_test, y_test),callbacks=[csv_logger,checkpointer]) print('x2jou_by_keras:finish!') def xjijou_predict(): import csv import numpy as np import keras from keras.models import load_model from keras import backend as K model = load_model(filepath='./jijou_model.400-0.06.hdf5') print('model_load:end!') nyuryoku_csv = np.loadtxt('y_xjijou.csv',delimiter=",",skiprows=1) x_test_mae = nyuryoku_csv[100:200,0:2] y_test_mae = nyuryoku_csv[100:200,2:3].astype(np.int) num_classes = 2 #Kerasのバックエンドで動くTensorFlowとTheanoでは入力チャンネルの順番が違うので場合分けして書いています if K.image_data_format() == 'channels_first': #x_train = x_train.reshape(x_train.shape[0], 1, 2) x_test = x_test_mae.reshape(x_test_mae.shape[0], 1, 2) input_shape = (1, 2) else: #x_train = x_train.reshape(x_train.shape[0], 2, 1) x_test = x_test_mae.reshape(x_test_mae.shape[0], 2, 1) input_shape = (2, 1) y_test = keras.utils.np_utils.to_categorical(y_test_mae, num_classes) with open('xjijou_kakikomi.csv','w') as f: writer = csv.writer(f,lineterminator='\n') y_test_pred=model.predict(x_test) for kurikai in range(100): kakikomilist = nyuryoku_csv[100+kurikai].tolist() kakikomilist.append(y_test_pred[kurikai][0]) kakikomilist.append(y_test_pred[kurikai][1]) writer.writerow(kakikomilist) print('predict&csvkakikomi:end!') def x3ji_by_keras(): import csv import numpy as np import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler, ReduceLROnPlateau from keras import backend as K print('x3ji_by_keras:start!') batch_size = 10 num_classes = 2 epochs = 1000 nyuryoku_csv = np.loadtxt('3jikansu.csv',delimiter=",",skiprows=1) x_train = nyuryoku_csv[0:500,0:2] x_test = nyuryoku_csv[500:1000,0:2] y_train_mae = nyuryoku_csv[0:500,2:3].astype(np.int) y_test_mae = nyuryoku_csv[500:1000,2:3].astype(np.int) #y_train = np.zeros((100,2)) #y_test = np.zeros((100,2)) #for num in range(100): #y_train[num][y_train_mae[num][0]] = 1 #y_test[num][y_test_mae[num][0]] = 1 #y_train= np.reshape(y_train,(100)) #y_test= np.reshape(y_test,(100)) #img_rows, img_cols = 28, 28 #(x_train, y_train), (x_test, y_test) = mnist.load_data() #Kerasのバックエンドで動くTensorFlowとTheanoでは入力チャンネルの順番が違うので場合分けして書いています if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, 2) x_test = x_test.reshape(x_test.shape[0], 1, 2) input_shape = (1, 2) else: x_train = x_train.reshape(x_train.shape[0], 2, 1) x_test = x_test.reshape(x_test.shape[0], 2, 1) input_shape = (2, 1) #x_train = x_train.astype('float32')#float型(データ数)×28×28ndarray #x_test = x_test.astype('float32') #x_train /= 255 #x_test /= 255 #print('x_train shape:', x_train.shape) #print(x_train.shape[0], 'train samples') #print(x_test.shape[0], 'test samples') #y_train = y_train.astype('int32')#int型(データ数)×10ndarray #y_test = y_test.astype('int32') y_train = keras.utils.np_utils.to_categorical(y_train_mae, num_classes) y_test = keras.utils.np_utils.to_categorical(y_test_mae, num_classes) checkpointer = ModelCheckpoint(filepath='3jikansu_model.{epoch:02d}-{val_loss:.2f}.hdf5', verbose=1, save_best_only=True, period = 100) csv_logger = CSVLogger('model.log') model = Sequential() model.add(Dense(10, activation='relu', input_shape = input_shape)) model.add(Dense(10, activation='relu')) model.add(Flatten()) model.add(Dense(num_classes, activation='softmax')) print('model_setting:end!') model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) print('model_compile:end!') model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,validation_data=(x_test, y_test),callbacks=[csv_logger,checkpointer]) print('x3ji_by_keras:finish!') def x3ji_predict(): import csv import numpy as np import keras from keras.models import load_model from keras import backend as K model = load_model(filepath='./3jikansu_model.1000-0.04.hdf5') print('model_load:end!') nyuryoku_csv = np.loadtxt('3jikansu.csv',delimiter=",",skiprows=1) x_test_mae = nyuryoku_csv[500:1000,0:2] y_test_mae = nyuryoku_csv[500:1000,2:3].astype(np.int) num_classes = 2 #Kerasのバックエンドで動くTensorFlowとTheanoでは入力チャンネルの順番が違うので場合分けして書いています if K.image_data_format() == 'channels_first': #x_train = x_train.reshape(x_train.shape[0], 1, 2) x_test = x_test_mae.reshape(x_test_mae.shape[0], 1, 2) input_shape = (1, 2) else: #x_train = x_train.reshape(x_train.shape[0], 2, 1) x_test = x_test_mae.reshape(x_test_mae.shape[0], 2, 1) input_shape = (2, 1) y_test = keras.utils.np_utils.to_categorical(y_test_mae, num_classes) with open('3jikansu_kakikomi.csv','w') as f: writer = csv.writer(f,lineterminator='\n') y_test_pred=model.predict(x_test) for kurikai in range(500): kakikomilist = nyuryoku_csv[500+kurikai].tolist() kakikomilist.append(y_test_pred[kurikai][0]) kakikomilist.append(y_test_pred[kurikai][1]) writer.writerow(kakikomilist) print('predict&csvkakikomi:end!') #mnist_by_keras() #x2jou_by_keras() #xjijou_predict() #x3ji_by_keras() x3ji_predict()