-
Notifications
You must be signed in to change notification settings - Fork 1
/
inference.py
38 lines (29 loc) · 1017 Bytes
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from settings_inference import (
MODEL_CONFIG_PATH,
MODEL_WEIGHT_PATH,
TOKENIZER_PATH,
DATE_TO_INFERENCE
)
from utils import get_inference_model, generate_caption
import json
import tensorflow as tf
import argparse
print("\n\nINFERENCE: {}\n\n".format(DATE_TO_INFERENCE))
# get tokenizer layer from disk
tokenizer = tf.keras.models.load_model(TOKENIZER_PATH)
tokenizer = tokenizer.layers[1]
# get model
model = get_inference_model(MODEL_CONFIG_PATH)
# load model weights
model.load_weights(MODEL_WEIGHT_PATH)
# generate new caption from input image
parser = argparse.ArgumentParser(description="Image Captioning")
parser.add_argument("--path", help="Path to image file.")
image_path = parser.parse_args().path
with open(MODEL_CONFIG_PATH) as json_file:
model_config = json.load(json_file)
text_caption = generate_caption(
image_path, model, tokenizer, model_config["SEQ_LENGTH"]
)
print("\n\nPREDICTING_WITH_MODEL: ", MODEL_WEIGHT_PATH)
print("PREDICT CAPTION : %s" % (text_caption))