diff --git a/README.md b/README.md index 4373a56..bbd0877 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ The images used to train the network are from [Cornell Grasping Dataset](http:// Having in mind a parallel plate griper before it closes, a simple and natural way of picturing the grasping position in an image would be a rectangle (see figure 1). -One way representing it uniquely is as +One way representing it uniquely is as g = {x, y, \theta, h, w} @@ -32,7 +32,7 @@ The procedure follows these steps: - convert the grasping dataset in TFRecords - train on the grasping dataset using the pretrained weights -### Prepering Imagenet +### Preparing Imagenet Before running the script you will need to download and convert the ImageNet data to native TFRecord format. Check this [link](https://github.com/tensorflow/models/tree/master/research/inception#getting-started) from the Inception model from Google. I found the whole Inception model in Github very useful. @@ -44,13 +44,13 @@ Check also in the end of the file the options that you can use, for example: ./imagenet_classifier.py --batch_size=128 --model_path=./models/imagenet/m1/m1.ckpt --train_or_validation=train -Running on a GTX 980 and a very^2 good Xeon it needs around two days (I didn't time it). Check in the begining if the model is saving/restoring the weights. +Running on a GTX 980 and a very^2 good Xeon it needs around two days (I didn't time it). Check in the begining if the model is saving/restoring the weights. -### Prepering Cornell grasping dataset +### Preparing Cornell Grasping Dataset -After downloading and decompressing run `build_cgd_dataset.py`. Make sure to adapt to your needs the python file, for example +After downloading and decompressing run `build_cgd_dataset.py`. Make sure to adapt to your needs the python file, for example - - point `dataset` in the right place + - point `dataset` in the right place - in `filename[:49]` adapt the number 49 (you can contribute, or I will program it better someday) ### Train on grasping dataset diff --git a/build_cgd_dataset.py b/build_cgd_dataset.py index b72eded..fa185c6 100755 --- a/build_cgd_dataset.py +++ b/build_cgd_dataset.py @@ -1,18 +1,193 @@ #!/usr/local/bin/python '''Converts Cornell Grasping Dataset data into TFRecords data format using Example protos. The raw data set resides in png and txt files located in the following structure: - + dataset/03/pcd0302r.png dataset/03/pcd0302cpos.txt ''' - -import tensorflow as tf +import os +import errno +import traceback +import itertools +import six import os import glob import numpy as np -dataset = '/root/dataset/cornell_grasping_dataset' +import numpy as np +import tensorflow as tf +import re +from scipy.ndimage.filters import median_filter + +# progress bars https://github.com/tqdm/tqdm +# import tqdm without enforcing it as a dependency +try: + from tqdm import tqdm +except ImportError: + + def tqdm(*args, **kwargs): + if args: + return args[0] + return kwargs.get('iterable', None) + +from tensorflow.python.platform import flags +from tensorflow.python.platform import gfile +from tensorflow.python.ops import data_flow_ops +from tensorflow.python.keras.utils import get_file +from tensorflow.python.keras._impl.keras.utils.data_utils import _hash_file +import keras +from keras import backend as K + + +flags.DEFINE_string('data_dir', + os.path.join(os.path.expanduser("~"), + '.keras', 'datasets', 'cornell_grasping'), + """Path to dataset in TFRecord format + (aka Example protobufs) and feature csv files.""") +flags.DEFINE_string('grasp_dataset', 'all', 'TODO(ahundt): integrate with brainrobotdata or allow subsets to be specified') +flags.DEFINE_boolean('grasp_download', True, + """Download the grasp_dataset to data_dir if it is not already present.""") + +FLAGS = flags.FLAGS + + +def mkdir_p(path): + """Create the specified path on the filesystem like the `mkdir -p` command + + Creates one or more filesystem directory levels as needed, + and does not return an error if the directory already exists. + """ + # http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python + try: + os.makedirs(path) + except OSError as exc: # Python >2.5 + if exc.errno == errno.EEXIST and os.path.isdir(path): + pass + else: + raise + + +def is_sequence(arg): + """Returns true if arg is a list or another Python Sequence, and false otherwise. + + source: https://stackoverflow.com/a/17148334/99379 + """ + return (not hasattr(arg, "strip") and + hasattr(arg, "__getitem__") or + hasattr(arg, "__iter__")) + + +class GraspDataset(object): + """Cornell Grasping Dataset - about 5GB total size + http:pr.cs.cornell.edu/grasping/rect_data/data.php + + Downloads to `~/.keras/datasets/cornell_grasping` by default. + + # Arguments + + data_dir: Path to dataset in TFRecord format + (aka Example protobufs) and feature csv files. + `~/.keras/datasets/grasping` by default. + + dataset: 'all' to load all the data. + + download: True to actually download the dataset, also see FLAGS. + """ + def __init__(self, data_dir=None, dataset=None, download=None, verbose=0): + if data_dir is None: + data_dir = FLAGS.data_dir + self.data_dir = data_dir + if dataset is None: + dataset = FLAGS.grasp_dataset + self.dataset = dataset + if download is None: + download = FLAGS.grasp_download + if download: + self.download(data_dir, dataset) + self.verbose = verbose + + def download(self, data_dir=None, dataset='all'): + '''Cornell Grasping Dataset - about 5GB total size + + http:pr.cs.cornell.edu/grasping/rect_data/data.php + + Downloads to `~/.keras/datasets/cornell_grasping` by default. + Includes grasp_listing.txt with all files in all datasets; + the feature csv files which specify the dataset size, + the features (data channels), and the number of grasps; + and the tfrecord files which actually contain all the data. + + If `grasp_listing_hashed.txt` is present, an additional + hashing step will will be completed to verify dataset integrity. + `grasp_listing_hashed.txt` will be generated automatically when + downloading with `dataset='all'`. + + # Arguments + + dataset: The name of the dataset to download, downloads all by default + with the '' parameter, 102 will download the 102 feature dataset + found in grasp_listing.txt. + + # Returns + + list of paths to the downloaded files + + ''' + dataset = self._update_dataset_param(dataset) + if data_dir is None: + if self.data_dir is None: + data_dir = FLAGS.data_dir + else: + data_dir = self.data_dir + mkdir_p(data_dir) + print('Downloading datasets to: ', data_dir) + + url_prefix = '' + # If a hashed version of the listing is available, + # download the dataset and verify hashes to prevent data corruption. + listing_hash = os.path.join(data_dir, 'grasp_listing_hash.txt') + if os.path.isfile(listing_hash): + files_and_hashes = np.genfromtxt(listing_hash, dtype='str', delimiter=' ') + files = [get_file(fpath.split('/')[-1], url_prefix + fpath, cache_subdir=data_dir, file_hash=hash_str, extract=True) + for fpath, hash_str in tqdm(files_and_hashes) + if '_' + str(dataset) in fpath] + else: + # If a hashed version of the listing is not available, + # simply download the dataset normally. + listing_url = 'https://raw.githubusercontent.com/ahundt/robot-grasp-detection/master/grasp_listing.txt' + grasp_listing_path = get_file('grasp_listing.txt', listing_url, cache_subdir=data_dir) + grasp_files = np.genfromtxt(grasp_listing_path, dtype=str) + files = [get_file(fpath.split('/')[-1], url_prefix + fpath, cache_subdir=data_dir, extract=True) + for fpath in tqdm(grasp_files) + if '_' + dataset in fpath] + + # If all files are downloaded, generate a hashed listing. + if dataset is 'all' or dataset is '': + print('Hashing all dataset files to prevent corruption...') + hashes = [] + for i, f in enumerate(tqdm(files)): + hashes.append(_hash_file(f)) + file_hash_np = np.column_stack([grasp_files, hashes]) + with open(listing_hash, 'wb') as hash_file: + np.savetxt(hash_file, file_hash_np, fmt='%s', delimiter=' ', header='file_path sha256') + print('Hashing complete, {} contains each url plus hash, and will be used to verify the ' + 'dataset during future calls to download().'.format(listing_hash)) + + return files + + def _update_dataset_param(self, dataset): + """Internal function to configure which subset of the datasets is being used. + Helps to choose a reasonable default action based on previous user parameters. + """ + if dataset is None and self.dataset is None: + return [] + if dataset is 'all': + dataset = '' + if dataset is None and self.dataset is not None: + dataset = self.dataset + return dataset + class ImageCoder(object): def __init__(self): @@ -31,12 +206,12 @@ def _process_image(filename, coder): assert len(image.shape) == 3 height = image.shape[0] width = image.shape[1] - assert image.shape[2] == 3 + assert image.shape[2] == 3 return image_data, height, width def _process_bboxes(name): - '''Create a list with the coordinates of the grasping rectangles. Every + '''Create a list with the coordinates of the grasping rectangles. Every element is either x or y of a vertex.''' with open(name, 'r') as f: bboxes = list(map( @@ -65,33 +240,36 @@ def _convert_to_example(filename, bboxes, image_buffer, height, width): 'image/width': _int64_feature(width), 'bboxes': _floats_feature(bboxes)})) return example - + def main(): - - train_file = os.path.join(dataset, 'train-cgd') - validation_file = os.path.join(dataset, 'validation-cgd') + + gd = GraspDataset() + if FLAGS.grasp_download: + gd.download(dataset=FLAGS.grasp_dataset) + train_file = os.path.join(FLAGS.data_dir, 'train-cgd') + validation_file = os.path.join(FLAGS.data_dir, 'validation-cgd') print(train_file) print(validation_file) writer_train = tf.python_io.TFRecordWriter(train_file) writer_validation = tf.python_io.TFRecordWriter(validation_file) - + # Creating a list with all the image paths folders = range(1,11) folders = ['0'+str(i) if i<10 else '10' for i in folders] filenames = [] for i in folders: - for name in glob.glob(os.path.join(dataset, i, 'pcd'+i+'*r.png')): + for name in glob.glob(os.path.join(FLAGS.data_dir, i, 'pcd'+i+'*r.png')): filenames.append(name) - + # Shuffle the list of image paths np.random.shuffle(filenames) - + count = 0 valid_img = 0 train_img = 0 coder = ImageCoder() - for filename in filenames: + for filename in tqdm(filenames): bbox = filename[:-5]+'cpos.txt' bboxes = _process_bboxes(bbox) image_buffer, height, width = _process_image(filename, coder) @@ -104,9 +282,9 @@ def main(): writer_train.write(example.SerializeToString()) train_img +=1 count +=1 - + print('Done converting %d images in TFRecords with %d train images and %d validation images' % (count, train_img, valid_img)) - + writer_train.close() writer_validation.close() diff --git a/grasp_det.py b/grasp_det.py index f990041..ab9c305 100755 --- a/grasp_det.py +++ b/grasp_det.py @@ -1,5 +1,5 @@ #!/usr/local/bin/python -''' +''' Training a network on cornell grasping dataset for detecting grasping positions. ''' import sys @@ -12,9 +12,52 @@ import grasp_img_proc from grasp_inf import inference import time +from tensorflow.python.platform import flags + + +flags.DEFINE_string('data_dir', + os.path.join(os.path.expanduser("~"), + '.keras', 'datasets', 'cornell_grasping'), + """Path to dataset in TFRecord format + (aka Example protobufs) and feature csv files.""") +flags.DEFINE_string('grasp_dataset', 'all', 'TODO(ahundt): integrate with brainrobotdata or allow subsets to be specified') +flags.DEFINE_boolean('grasp_download', False, + """Download the grasp_dataset to data_dir if it is not already present.""") + +flags.DEFINE_float( + 'learning_rate', + 0.001, + 'Initial learning rate.' +) +flags.DEFINE_integer( + 'num_epochs', + None, + 'Number of epochs to run trainer.' +) +flags.DEFINE_integer( + 'batch_size', + 64, + 'Batch size.' +) +flags.DEFINE_string( + 'log_dir', + '/tmp/tf', + 'Tensorboard log_dir.' +) +flags.DEFINE_string( + 'model_path', + '/tmp/tf/model.ckpt', + 'Variables for the model.' +) +flags.DEFINE_string( + 'train_or_validation', + 'validation', + 'Train or evaluate the dataset' +) -TRAIN_FILE = '/root/dataset/cornell_grasping_dataset/train-cgd' -VALIDATE_FILE = '/root/dataset/cornell_grasping_dataset/validation-cgd' +FLAGS = flags.FLAGS +TRAIN_FILE = FLAGS.data_dir + '/train-cgd' +VALIDATE_FILE = FLAGS.data_dir + '/validation-cgd' def bboxes_to_grasps(bboxes): # converting and scaling bounding boxes into grasps, g = {x, y, tan, h, w} @@ -45,10 +88,10 @@ def run_training(): print('inputs') data_files_ = VALIDATE_FILE images, bboxes = grasp_img_proc.inputs([data_files_]) - + x, y, tan, h, w = bboxes_to_grasps(bboxes) x_hat, y_hat, tan_hat, h_hat, w_hat = tf.unstack(inference(images), axis=1) # list - # tangent of 85 degree is 11 + # tangent of 85 degree is 11 tan_hat_confined = tf.minimum(11., tf.maximum(-11., tan_hat)) tan_confined = tf.minimum(11., tf.maximum(-11., tan)) # Loss function @@ -65,7 +108,7 @@ def run_training(): l = ['w1', 'b1', 'w2', 'b2', 'w3', 'b3', 'w4', 'b4', 'w5', 'b5', 'w_fc1', 'b_fc1', 'w_fc2', 'b_fc2'] for i in l: d[i] = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if v.name == i+':0'][0] - + dg={} lg = ['w1', 'b1', 'w2', 'b2', 'w3', 'b3', 'w4', 'b4', 'w5', 'b5', 'w_fc1', 'b_fc1', 'w_fc2', 'b_fc2', 'w_output', 'b_output'] for i in lg: @@ -85,7 +128,7 @@ def run_training(): if FLAGS.train_or_validation == 'train': _, loss_value, x_value, x_model, tan_value, tan_model, h_value, h_model, w_value, w_model = sess.run([train_op, loss, x, x_hat, tan, tan_hat, h, h_hat, w, w_hat]) duration = time.time() - start_batch - if step % 100 == 0: + if step % 100 == 0: print('Step %d | loss = %s\n | x = %s\n | x_hat = %s\n | tan = %s\n | tan_hat = %s\n | h = %s\n | h_hat = %s\n | w = %s\n | w_hat = %s\n | (%.3f sec/batch\n')%(step, loss_value, x_value[:3], x_model[:3], tan_value[:3], tan_model[:3], h_value[:3], h_model[:3], w_value[:3], w_model[:3], duration) if step % 1000 == 0: saver_g.save(sess, FLAGS.model_path) @@ -93,10 +136,10 @@ def run_training(): bbox_hat = grasp_to_bbox(x_hat, y_hat, tan_hat, h_hat, w_hat) bbox_value, bbox_model, tan_value, tan_model = sess.run([bboxes, bbox_hat, tan, tan_hat]) bbox_value = np.reshape(bbox_value, -1) - bbox_value = [(bbox_value[0]*0.35,bbox_value[1]*0.47),(bbox_value[2]*0.35,bbox_value[3]*0.47),(bbox_value[4]*0.35,bbox_value[5]*0.47),(bbox_value[6]*0.35,bbox_value[7]*0.47)] + bbox_value = [(bbox_value[0]*0.35,bbox_value[1]*0.47),(bbox_value[2]*0.35,bbox_value[3]*0.47),(bbox_value[4]*0.35,bbox_value[5]*0.47),(bbox_value[6]*0.35,bbox_value[7]*0.47)] p1 = Polygon(bbox_value) p2 = Polygon(bbox_model) - iou = p1.intersection(p2).area / (p1.area +p2.area -p1.intersection(p2).area) + iou = p1.intersection(p2).area / (p1.area +p2.area -p1.intersection(p2).area) angle_diff = np.abs(np.arctan(tan_model)*180/np.pi -np.arctan(tan_value)*180/np.pi) duration = time.time() -start_batch if angle_diff < 30. and iou >= 0.25: @@ -113,50 +156,8 @@ def run_training(): def main(_): run_training() - + if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument( - '--learning_rate', - type=float, - default=0.001, - help='Initial learning rate.' - ) - parser.add_argument( - '--data_dir', - type=str, - default='/root/imagenet-data', - help='Directory with training data.' - ) - parser.add_argument( - '--num_epochs', - type=int, - default=None, - help='Number of epochs to run trainer.' - ) - parser.add_argument( - '--batch_size', - type=int, - default=64, - help='Batch size.' - ) - parser.add_argument( - '--log_dir', - type=str, - default='/tmp/tf', - help='Tensorboard log_dir.' - ) - parser.add_argument( - '--model_path', - type=str, - default='/tmp/tf/model.ckpt', - help='Variables for the model.' - ) - parser.add_argument( - '--train_or_validation', - type=str, - default='validation', - help='Train or evaluate the dataset' - ) - FLAGS, unparsed = parser.parse_known_args() - tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) + FLAGS._parse_flags() + tf.app.run(main=main) + # tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/grasp_inf.py b/grasp_inf.py index 4c0b14e..8d69ec9 100644 --- a/grasp_inf.py +++ b/grasp_inf.py @@ -5,20 +5,20 @@ FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_bool('trainable', True, - """Computes or not gradients for learning.""") + """Computes or not gradients for learning.""") def conv2d_s2(x, W): return tf.nn.conv2d(x, W, strides=[1,2,2,1], padding='SAME') def conv2d_s1(x, W): return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME') - + def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') def inference(images): if FLAGS.trainable: - keep_prob = 1. + keep_prob = 1. else: keep_prob = 1. print('keep_prob = %.1f' %keep_prob) @@ -27,7 +27,7 @@ def inference(images): b1 = tf.get_variable('b1', initializer=tf.constant(0.1, shape=[64]), trainable=FLAGS.trainable) h1 = tf.nn.relu(conv2d_s2(images, w1)+b1) h1_pool = max_pool_2x2(h1) - + w2 = tf.get_variable('w2', [3,3,64,128], trainable=FLAGS.trainable) b2 = tf.get_variable('b2', initializer=tf.constant(0.1, shape=[128]), trainable=FLAGS.trainable) h2 = tf.nn.relu(conv2d_s2(h1_pool,w2)+b2) @@ -36,7 +36,7 @@ def inference(images): w3 = tf.get_variable('w3', [3,3,128,128], trainable=FLAGS.trainable) b3 = tf.get_variable('b3', initializer=tf.constant(0.1, shape=[128]), trainable=FLAGS.trainable) h3 = tf.nn.relu(conv2d_s1(h2_pool,w3)+b3) - + w4 = tf.get_variable('w4', [3,3,128,128], trainable=FLAGS.trainable) b4 = tf.get_variable('b4', initializer=tf.constant(0.1, shape=[128]), trainable=FLAGS.trainable) h4 = tf.nn.relu(conv2d_s1(h3,w4)+b4) @@ -45,7 +45,7 @@ def inference(images): b5 = tf.get_variable('b5', initializer=tf.constant(0.1, shape=[256]), trainable=FLAGS.trainable) h5 = tf.nn.relu(conv2d_s1(h4,w5)+b5) h5_pool = max_pool_2x2(h5) - + w_fc1 = tf.get_variable('w_fc1', [7*7*256,512], trainable=FLAGS.trainable) b_fc1 = tf.get_variable('b_fc1', initializer=tf.constant(0.1, shape=[512]), trainable=FLAGS.trainable) h5_flat = tf.reshape(h5_pool, [-1, 7*7*256]) @@ -60,5 +60,5 @@ def inference(images): w_output = tf.get_variable('w_output', [512, 5], trainable=FLAGS.trainable) b_output = tf.get_variable('b_output', initializer=tf.constant(0.1, shape=[5]), trainable=FLAGS.trainable) output = tf.matmul(h_fc2_dropout, w_output)+b_output - + return output diff --git a/grasp_listing.txt b/grasp_listing.txt new file mode 100644 index 0000000..b498bfb --- /dev/null +++ b/grasp_listing.txt @@ -0,0 +1,12 @@ +http://pr.cs.cornell.edu/grasping/rect_data/temp/data01.tar.gz +http://pr.cs.cornell.edu/grasping/rect_data/temp/data02.tar.gz +http://pr.cs.cornell.edu/grasping/rect_data/temp/data03.tar.gz +http://pr.cs.cornell.edu/grasping/rect_data/temp/data04.tar.gz +http://pr.cs.cornell.edu/grasping/rect_data/temp/data05.tar.gz +http://pr.cs.cornell.edu/grasping/rect_data/temp/data06.tar.gz +http://pr.cs.cornell.edu/grasping/rect_data/temp/data07.tar.gz +http://pr.cs.cornell.edu/grasping/rect_data/temp/data08.tar.gz +http://pr.cs.cornell.edu/grasping/rect_data/temp/data09.tar.gz +http://pr.cs.cornell.edu/grasping/rect_data/temp/data10.tar.gz +http://pr.cs.cornell.edu/grasping/rect_data/backgrounds.zip +http://pr.cs.cornell.edu/grasping/rect_data/processedData.zip \ No newline at end of file diff --git a/inference.py b/inference.py index 60ef3ed..bb0df50 100644 --- a/inference.py +++ b/inference.py @@ -5,14 +5,14 @@ FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_bool('trainable', False, - """Computes or not gradients for learning.""") + """Computes or not gradients for learning.""") def conv2d_s2(x, W): return tf.nn.conv2d(x, W, strides=[1,2,2,1], padding='SAME') def conv2d_s1(x, W): return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME') - + def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') @@ -27,7 +27,7 @@ def inference(images): b1 = tf.get_variable('b1', initializer=tf.constant(0.1, shape=[64]), trainable=FLAGS.trainable) h1 = tf.nn.relu(conv2d_s2(images, w1)+b1) h1_pool = max_pool_2x2(h1) - + w2 = tf.get_variable('w2', [3,3,64,128], trainable=FLAGS.trainable) b2 = tf.get_variable('b2', initializer=tf.constant(0.1, shape=[128]), trainable=FLAGS.trainable) h2 = tf.nn.relu(conv2d_s2(h1_pool,w2)+b2) @@ -36,7 +36,7 @@ def inference(images): w3 = tf.get_variable('w3', [3,3,128,128], trainable=FLAGS.trainable) b3 = tf.get_variable('b3', initializer=tf.constant(0.1, shape=[128]), trainable=FLAGS.trainable) h3 = tf.nn.relu(conv2d_s1(h2_pool,w3)+b3) - + w4 = tf.get_variable('w4', [3,3,128,128], trainable=FLAGS.trainable) b4 = tf.get_variable('b4', initializer=tf.constant(0.1, shape=[128]), trainable=FLAGS.trainable) h4 = tf.nn.relu(conv2d_s1(h3,w4)+b4) @@ -45,20 +45,20 @@ def inference(images): b5 = tf.get_variable('b5', initializer=tf.constant(0.1, shape=[256]), trainable=FLAGS.trainable) h5 = tf.nn.relu(conv2d_s1(h4,w5)+b5) h5_pool = max_pool_2x2(h5) - + w_fc1 = tf.get_variable('w_fc1', [7*7*256,512], trainable=FLAGS.trainable) b_fc1 = tf.get_variable('b_fc1', initializer=tf.constant(0.1, shape=[512]), trainable=FLAGS.trainable) h5_flat = tf.reshape(h5_pool, [-1, 7*7*256]) h_fc1 = tf.nn.relu(tf.matmul(h5_flat,w_fc1)+b_fc1) h_fc1_dropout = tf.nn.dropout(h_fc1, keep_prob) - + w_fc2 = tf.get_variable('w_fc2', [512,512], trainable=FLAGS.trainable) b_fc2 = tf.get_variable('b_fc2', initializer=tf.constant(0.1, shape=[512]), trainable=FLAGS.trainable) h_fc2 = tf.nn.relu(tf.matmul(h_fc1_dropout, w_fc2)+b_fc2) h_fc2_dropout = tf.nn.dropout(h_fc2, keep_prob) - + w_output = tf.get_variable('w_output', [512, 1000], trainable=FLAGS.trainable) b_output = tf.get_variable('b_output', [1000]) output = tf.matmul(h_fc2_dropout,w_output)+b_output - + return output