Commit 4af47fd9 authored by Glydzo's avatar Glydzo
Browse files

Last modifications.

parent 5baa5143
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
......@@ -10,7 +10,7 @@ import os
# 2 = INFO and WARNING messages are not printed
# 3 = INFO, WARNING, and ERROR messages are not printed
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
import tensorflow as tf
import numpy as np
......@@ -18,7 +18,7 @@ from tqdm import tqdm
from utils import best_loss, magic_accuracy
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
gpus = tf.config.experimental.list_physical_devices('GPU')
......@@ -34,13 +34,13 @@ model = tf.keras.models.load_model(models_path + 'model_tech_db_filtered2020-05-
#model.summary()
image = np.array([np.load('../res/dataset/1000008.npy')])
image = np.array([np.load('../res/dataset/1000008.npy')], dtype=np.float32)
qp = np.array([22])
qp = np.array([22], dtype=np.float32)
elapsed_times = []
for i in tqdm(range(10000)):
for i in tqdm(range(1000)):
start_time = time.time()
......@@ -52,3 +52,7 @@ for i in tqdm(range(10000)):
elapsed_times.append((end_time - start_time) * 1000)
print('Inferences finished ! Dimensions : ', prediction.shape, ' / Average execution time : %.3f ms ' % (sum(elapsed_times)/len(elapsed_times)), sep="")
models_path = '../res/models/'
weights_path = '../res/weights/'
dataset_path = '../res/dataset/'
nb_ctus_fullhd = 510
nb_ctus_4k = 2176
nb_max_images_to_process = 10
import time
import os
......@@ -12,12 +17,16 @@ import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import tensorflow as tf
from CNN import model_technicolor_vector_multi_qp
from cnn import model_technicolor_vector_multi_qp
import numpy as np
from tqdm import tqdm
from matplotlib import pyplot as plt
from bcolors import bcolors
print(bcolors.BOLD + '\nInitialization...\n' + bcolors.ENDC)
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
print(bcolors.OKGREEN + 'Num GPUs Available: ' + str(len(tf.config.experimental.list_physical_devices('GPU'))) + bcolors.ENDC)
gpus = tf.config.experimental.list_physical_devices('GPU')
......@@ -28,31 +37,76 @@ if gpus:
except RuntimeError as e:
print(e)
with tf.device('/GPU:0'):
model = model_technicolor_vector_multi_qp()
#model = tf.saved_model.load(models_path + 'OptimizedModel/')
#infer = model.signatures["serving_default"]
print(bcolors.OKGREEN + 'Model created!' + bcolors.ENDC)
# Either load the weights from the h5 file, or load them from the native format using the expect_partial() method of load_weights(...)
#model.load_weights(weights_path + 'weights.h5')
#model.load_weights(weights_path + 'my_weights').expect_partial()
model = model_technicolor_vector_multi_qp()
print(bcolors.OKGREEN + 'Weights loaded!' + bcolors.ENDC)
# Either load the weights from the h5 file, or load them from the native format using the expect_partial() method of load_weights(...)
#model = tf.keras.models.load_model(models_path + 'model_tech_db_filtered2020-05-13.h5')
model.load_weights(weights_path + 'weights.h5')
#model.load_weights(weights_path + 'my_weights').expect_partial()
model.summary()
#tf.keras.models.save_model(model, models_path + 'SavedModel/')
#model.summary()
print(bcolors.BOLD + '\nData preparation...\n' + bcolors.ENDC)
image = np.array([np.load('../res/dataset/1000008.npy')])
image = []
qp = []
qp = np.array([22])
print(bcolors.WARNING + 'Shape of input CTUs : ' + str(np.load(dataset_path + '1000008.npy').shape) + bcolors.ENDC)
elapsed_times = []
for i in tqdm(range(nb_ctus_fullhd * nb_max_images_to_process)):
for i in tqdm(range(10000)):
#image.append(np.load(dataset_path + '1000008.npy'))
image.append(np.random.rand(68,68,1))
qp.append(22)
image = np.array(image)
qp = np.array(qp)
quantity_to_input = np.linspace(1, (nb_ctus_fullhd * nb_max_images_to_process), (nb_ctus_fullhd * nb_max_images_to_process) * 100)
print(bcolors.OKGREEN + 'Loading of ' + str(nb_ctus_fullhd * nb_max_images_to_process) + ' CTUs (' + str(nb_max_images_to_process) + ' images FullHD) finished!' + bcolors.ENDC)
elapsed_times = []
with tf.device('/GPU:0'):
print(bcolors.BOLD + '\nStart of inferences...\n' + bcolors.ENDC)
for i in range(len(quantity_to_input)):
print(bcolors.OKBLUE + 'Prediction ' + str(i+1) + ' : ' + str(round(quantity_to_input[i])) + ' CTUs at one time.' + bcolors.ENDC)
start_time = time.time()
prediction = model.predict([image,qp])
prediction = model.predict([image[:round(quantity_to_input[i])],qp[:round(quantity_to_input[i])]])
#x = [image[:round(quantity_to_input[i])],qp[:round(quantity_to_input[i])]]
#labeling = infer(x)
end_time = time.time()
elapsed_times.append((end_time - start_time) * 1000)
elapsed_times.append((end_time - start_time)*1000/round(quantity_to_input[i]))
#print(bcolors.OKGREEN,'Prediction finished ! Dimensions : ',prediction.shape,' / Execution time : %.3f ms'%((end_time - start_time)*1000/round(quantity_to_input[i])),sep="")
print(bcolors.OKGREEN,'Prediction finished ! / Execution time : %.3f ms'%((end_time - start_time)*1000/round(quantity_to_input[i])),sep="")
print(bcolors.ENDC)
print(bcolors.WARNING,'\nAll predictions are finished !\nAverage throughput : %.3f CTU/s or %.3f FPS (full HD)\n'%((1000/elapsed_times[len(elapsed_times)-1]),((1000/nb_ctus_fullhd)/elapsed_times[len(elapsed_times)-1])),sep="")
plt.plot(quantity_to_input, elapsed_times)
plt.title('Inference time as a function of the number of CTUs passed in input')
plt.xlabel('Nb of CTUs as input')
plt.ylabel('Execution time (in ms)')
plt.show()
print('Inferences finished ! Dimensions : ', prediction.shape, ' / Average execution time : %.3f ms ' % (sum(elapsed_times)/len(elapsed_times)), sep="")
models_path = '../res/models/'
dataset_path = '../res/dataset/'
from tensorflow.python.compiler.tensorrt import trt_convert as trt
input_saved_model_dir = models_path + 'SavedModel/'
output_saved_model_dir = models_path + 'OptimizedModel/'
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS
conversion_params = conversion_params._replace(max_workspace_size_bytes=(1<<32))
conversion_params = conversion_params._replace(precision_mode="FP32")
conversion_params = conversion_params._replace(maximum_cached_engines=100)
converter = trt.TrtGraphConverterV2(input_saved_model_dir=input_saved_model_dir) #,conversion_params=conversion_params)
graph_def = converter.convert()
converter.save(output_saved_model_dir)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
import tensorflow as tf
graph_def = tf.GraphDef()
def my_input_fn():
image = np.array([np.load(dataset_path + '1000008.npy')]).astype(np.float32)
print(image.shape)
qp = np.array([22]).astype(np.float32)
Inp1 = np.random.normal(size=(1, 68, 68, 1)).astype(np.float32)
yield ([Inp1,qp])
converter.build(input_fn=my_input_fn)
saved_model_loaded = tf.saved_model.load(output_saved_model_dir)
graph_func = saved_model_loaded.signatures[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
frozen_func = convert_to_constants.convert_variables_to_constants_v2(graph_func)
output = frozen_func(input_data)[0].numpy()
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment