Commit 39ca20bc authored by Mario Pasquato's avatar Mario Pasquato
Browse files

initial commit

parents
File added
from keras.models import load_model
import matplotlib.image as mpimg
import numpy as np
import os
model = load_model('model.hd5')
def imgprob(filename):
img = mpimg.imread(filename)
img = img[:,:,0]
img.shape = [1,800,800,1] #need to do this to feed it into the model
img = img.astype('float32')
#img = img/255 #also this, to go from integers to [0,1]
p = model.predict(img, verbose = 1)
return p
def predictimg(filename):
p = imgprob(filename)
predicted_class_y = np.argmax(p)
return predicted_class_y
def list_test_files(folder):
test_files = []
for dirname, dirnames, filenames in os.walk(folder):
for filename in filenames:
test_files.append(os.path.join(dirname, filename))
return(test_files)
legenda = ["Regular", "Jellyfish"]
def predict_on_folder(folder):
cerchiati = 0
noncerchiati = 0
for test_file in list_test_files(folder):
predicted = predictimg(test_file)
noncerchiati += predicted
cerchiati += (1 - predicted)
#print test_file + " " + legenda[predicted]
print "in folder " + folder
print "Predicted regular " + str(cerchiati)
print "Predicted jellyfish " + str(noncerchiati)
predict_on_folder('test/zero')
predict_on_folder('test/five')
predict_on_folder('three')
#!/usr/bin/python
#based on https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py
import keras
import os
import numpy as np
#import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.datasets import cifar10
import matplotlib.image as mpimg
width = 800
height = 800
#trying to get diagnostics for the out of memory error
#run_options = tf.RunOptions(report_tensor_allocations_upon_oom = True)
#sess.run(op, feed_dict=fdict, options=run_options)
#img = mpimg.imread('circled/00003.jpg')
#img = img[:,:,0:3]
def list_train_files():
train_files = []
for dirname, dirnames, filenames in os.walk('train'):
for filename in filenames:
train_files.append(os.path.join(dirname, filename))
return(train_files)
def list_test_files():
test_files = []
for dirname, dirnames, filenames in os.walk('test'):
for filename in filenames:
test_files.append(os.path.join(dirname, filename))
return(test_files)
def load_image(x):
img = mpimg.imread(x)
img = img[:,:,0] #keep only red channel hopefully they are all equal
return(img)
def load_label(x):
if "zero" in x:
return(0)
else:
return(1)
def hst_load_data():
train_files = list_train_files()
test_files = list_test_files()
print "Loading training images..."
x_train = np.array([load_image(x) for x in train_files])
y_train = np.array([load_label(x) for x in train_files])
print "Loading test images..."
x_test = np.array([load_image(x) for x in test_files])
y_test = np.array([load_label(x) for x in test_files])
n_train = len(train_files)
n_test = len(test_files)
print "Loaded " + str(n_train) + " training images, " + str(n_test) + " test images."
train_shape = (n_train, width, height, 1) #was (n_train, 3, width, height) and did not work
test_shape = (n_test, width, height, 1) #check that the images are still alright
x_train = x_train.reshape(train_shape)
x_test = x_test.reshape(test_shape)
y_train = y_train.reshape(n_train, )
y_test = y_test.reshape(n_test, )
print "Total number of images " + str(n_train + n_test)
print "Number of regular galaxies in train " + str(len(y_train) - y_train.sum())
print "Number of jellyfish galaxies in test " + str(len(y_test) - y_test.sum())
return (x_train, y_train), (x_test, y_test)
#read the data
(x_train, y_train), (x_test, y_test) = hst_load_data()
print x_train.shape
print y_train.shape
print x_test.shape
print y_test.shape
print x_train.mean()
#set learning parameters
batch_size = 10
num_classes = 2
epochs = 75
#convert to appropriate format (1-hot encoding)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print x_train.shape
print y_train.shape
#set up the net
model = Sequential() #empty sequential model
eta_dropout = 0.35
#1st layer is a convolutional layer with 8 neurons on 15x15 fields
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Dropout(eta_dropout))
model.add(MaxPooling2D(pool_size=(2, 2))) #followed by max pooling
#2nd layer also a convolutional layer, with max pooling, same as before
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(Dropout(eta_dropout))
model.add(MaxPooling2D(pool_size=(2, 2)))
#3rd layer
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(Dropout(eta_dropout))
model.add(MaxPooling2D(pool_size=(2, 2)))
#4th layer
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(Dropout(eta_dropout))
model.add(MaxPooling2D(pool_size=(2, 2)))
#5th layer is fully connected
model.add(Flatten())
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(eta_dropout))
#6th layer is fully connected
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(eta_dropout))
#7th layer is fully connected
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(eta_dropout))
#last layer outputs the probability of predicted classes
#(has num_classes neurons, softmax activation)
model.add(Dense(num_classes))
model.add(Activation('softmax'))
#see the model
model.summary()
# initiate RMSprop optimizer, see http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
#set up training
model.compile(loss='binary_crossentropy',
optimizer=opt,
metrics=['accuracy'])
#scale the pixel values to [0,1]
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
#x_train /= 255
#x_test /= 255
#train the model
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), shuffle=True)
model.save("model.hd5")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment