Skip to content

Commit

Permalink
Neuronales Netz
Browse files Browse the repository at this point in the history
  • Loading branch information
Kevin Huestis committed Mar 16, 2021
1 parent 82e180d commit e275078
Showing 1 changed file with 272 additions and 0 deletions.
272 changes: 272 additions & 0 deletions neuronales_netz_beispiel.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,272 @@
{
"metadata": {
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": 3
},
"orig_nbformat": 2
},
"nbformat": 4,
"nbformat_minor": 2,
"cells": [
{
"source": [
"# Hundeklassifikation mit Neuronalen Netzen\n",
"Der Code ist etwas angepasst von diesem Keras Tutorial:\n",
"https://keras.io/examples/vision/image_classification_from_scratch/\n",
"\n",
"Daten: https://www.kaggle.com/c/dogs-vs-cats/data\n",
"\n",
"Damit der Code so funktioniert, sollen die Daten in einer Order \"data/\" rein. Darunter folgende Struktur:\n",
" * data\n",
" - Cat\n",
" - Dog\n",
"\n",
"Und jeweils alle Hundedaten und alle Katzendaten.\n",
"\n",
"Das Vorgehen (ohne richtiges Validieren oder Testen) sollte in richtigen Anwendungen nicht gefolgt werden. Ein besseres Vorgehen wird z.B. hier gut beschrieben: http://karpathy.github.io/2019/04/25/recipe/"
],
"cell_type": "markdown",
"metadata": {}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"from tensorflow import keras\n",
"from tensorflow.keras import layers"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Generate Dataset\n",
"image_size = (180, 180)\n",
"batch_size = 32\n",
"\n",
"train_ds = tf.keras.preprocessing.image_dataset_from_directory(\n",
" \"data\",\n",
" validation_split=0.2,\n",
" subset=\"training\",\n",
" seed=1337,\n",
" image_size=image_size,\n",
" batch_size=batch_size,\n",
")\n",
"val_ds = tf.keras.preprocessing.image_dataset_from_directory(\n",
" \"data\",\n",
" validation_split=0.2,\n",
" subset=\"validation\",\n",
" seed=1337,\n",
" image_size=image_size,\n",
" ba"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Look at dataset\n",
"plt.figure(figsize=(10, 10))\n",
"for images, labels in train_ds.take(1):\n",
" for i in range(9):\n",
" ax = plt.subplot(3, 3, i + 1)\n",
" plt.imshow(images[i].numpy().astype(\"uint8\"))\n",
" plt.title(int(labels[i]))\n",
" plt.axis(\"off\")\n",
"\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Augment data\n",
"data_augmentation = keras.Sequential(\n",
" [\n",
" layers.experimental.preprocessing.RandomFlip(\"horizontal\"),\n",
" layers.experimental.preprocessing.RandomRotation(0.1),\n",
" ]\n",
")\n",
"\n",
"plt.figure(figsize=(10, 10))\n",
"for images, _ in train_ds.take(1):\n",
" for i in range(9):\n",
" augmented_images = data_augmentation(images)\n",
" ax = plt.subplot(3, 3, i + 1)\n",
" plt.imshow(augmented_images[0].numpy().astype(\"uint8\"))\n",
" plt.axis(\"off\")\n",
"\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Prepare data\n",
"augmented_train_ds = train_ds.map(\n",
" lambda x, y: (data_augmentation(x, training=True), y))\n",
"\n",
"train_ds = train_ds.prefetch(buffer_size=32)\n",
"val_ds = val_ds.prefetch(buffer_size=32)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Model\n",
"def make_model(input_shape, num_classes):\n",
" inputs = keras.Input(shape=input_shape)\n",
" # Image augmentation block\n",
" x = data_augmentation(inputs)\n",
"\n",
" # Entry block\n",
" x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(x)\n",
" x = layers.Conv2D(32, 3, strides=2, padding=\"same\")(x)\n",
" x = layers.BatchNormalization()(x)\n",
" x = layers.Activation(\"relu\")(x)\n",
"\n",
" x = layers.Conv2D(64, 3, padding=\"same\")(x)\n",
" x = layers.BatchNormalization()(x)\n",
" x = layers.Activation(\"relu\")(x)\n",
"\n",
" previous_block_activation = x # Set aside residual\n",
"\n",
" for size in [128, 256, 512, 728]:\n",
" x = layers.Activation(\"relu\")(x)\n",
" x = layers.SeparableConv2D(size, 3, padding=\"same\")(x)\n",
" x = layers.BatchNormalization()(x)\n",
"\n",
" x = layers.Activation(\"relu\")(x)\n",
" x = layers.SeparableConv2D(size, 3, padding=\"same\")(x)\n",
" x = layers.BatchNormalization()(x)\n",
"\n",
" x = layers.MaxPooling2D(3, strides=2, padding=\"same\")(x)\n",
"\n",
" # Project residual\n",
" residual = layers.Conv2D(size, 1, strides=2, padding=\"same\")(\n",
" previous_block_activation\n",
" )\n",
" x = layers.add([x, residual]) # Add back residual\n",
" previous_block_activation = x # Set aside next residual\n",
"\n",
" x = layers.SeparableConv2D(1024, 3, padding=\"same\")(x)\n",
" x = layers.BatchNormalization()(x)\n",
" x = layers.Activation(\"relu\")(x)\n",
"\n",
" x = layers.GlobalAveragePooling2D()(x)\n",
" if num_classes == 2:\n",
" activation = \"sigmoid\"\n",
" units = 1\n",
" else:\n",
" activation = \"softmax\"\n",
" units = num_classes\n",
"\n",
" x = layers.Dropout(0.5)(x)\n",
" outputs = layers.Dense(units, activation=activation)(x)\n",
" return keras.Model(inputs, outputs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = make_model(input_shape=image_size + (3,), num_classes=2)\n",
"# keras.utils.plot_model(model, show_shapes=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Train\n",
"epochs = 50\n",
"\n",
"callbacks = [\n",
" keras.callbacks.ModelCheckpoint(\"save_at_{epoch}.h5\"),\n",
"]\n",
"model.compile(\n",
" optimizer=keras.optimizers.Adam(1e-3),\n",
" loss=\"binary_crossentropy\",\n",
" metrics=[\"accuracy\"],\n",
")\n",
"history = model.fit(\n",
" train_ds, epochs=epochs, callbacks=callbacks, validation_data=val_ds,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize training\n",
"plt.plot(history.history['accuracy'])\n",
"plt.plot(history.history['val_accuracy'])\n",
"plt.title('model accuracy')\n",
"plt.ylabel('accuracy')\n",
"plt.xlabel('epoch')\n",
"plt.legend(['train', 'validation'])\n",
"plt.show()\n",
"\n",
"plt.plot(history.history['loss'])\n",
"plt.plot(history.history['val_loss'])\n",
"plt.title('model loss')\n",
"plt.ylabel('loss')\n",
"plt.xlabel('epoch')\n",
"plt.legend(['train', 'validation'])\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Inference\n",
"img = keras.preprocessing.image.load_img(\n",
" \"data/Cat/cat.12323.jpg\", target_size=image_size\n",
")\n",
"img_array = keras.preprocessing.image.img_to_array(img)\n",
"img_array = tf.expand_dims(img_array, 0) # Create batch axis\n",
"\n",
"predictions = model.predict(img_array)\n",
"score = predictions[0]\n",
"print(\n",
" \"This image is %.2f percent cat and %.2f percent dog.\"\n",
" % (100 * (1 - score), 100 * score)\n",
")"
]
}
]
}

0 comments on commit e275078

Please sign in to comment.