Skip to content

Add new sample for TensorFlow AMX BF16 Inference #1549

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
# details about this Python script
# - Enable auto-mixed precision with few code changes for faster inference.
# - Image Classification task using TensorFlow Hub's ResNet50v1.5 pretrained model.
# - Export the optimized model in the SavedModel format.

# Importing libraries
import os
import numpy as np
import time
import PIL.Image as Image
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
import requests
from copy import deepcopy
print("We are using Tensorflow version: ", tf.__version__)

# Check if hardware supports AMX

from cpuinfo import get_cpu_info
info = get_cpu_info()
flags = info['flags']
amx_supported = False
for flag in flags:
if "amx" in flag:
amx_supported = True
print("AMX is supported on current hardware. Code sample can be run.\n")
if not amx_supported:
print("AMX is not supported on current hardware. Code sample cannot be run.\n")
sys.exit("AMX is not supported on current hardware. Code sample cannot be run.\n")


tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
data_root = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)

batch_size = 512
img_height = 224
img_width = 224

train_ds = tf.keras.utils.image_dataset_from_directory(
str(data_root),
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)

val_ds = tf.keras.utils.image_dataset_from_directory(
str(data_root),
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)

class_names = np.array(train_ds.class_names)
print("The flower dataset has " + str(len(class_names)) + " classes: ", class_names)

# import pre-trained fp_32 model
fp32_model = tf.keras.models.load_model('models/my_saved_model_fp32')

normalization_layer = tf.keras.layers.Rescaling(1./255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y)) # Where x—images, y—labels.
val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y)) # Where x—images, y—labels.

AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)

import time
start = time.time()
fp32_history = fp32_model.evaluate(val_ds)
fp32_inference_time = time.time() - start



# Reload the model as the bf16 model with AVX512 to compare inference time
os.environ["ONEDNN_MAX_CPU_ISA"] = "AVX512_BF16"
tf.config.optimizer.set_experimental_options({'auto_mixed_precision_onednn_bfloat16':True})
bf16_model_noAmx = tf.keras.models.load_model('models/my_saved_model_fp32')

bf16_model_noAmx_export_path = "models/my_saved_model_bf16_noAmx"
bf16_model_noAmx.save(bf16_model_noAmx_export_path)

start = time.time()
bf16_noAmx_history = bf16_model_noAmx.evaluate(val_ds)
bf16_noAmx_inference_time = time.time() - start


# Reload the model as the bf16 model with AMX to compare inference time
os.environ["ONEDNN_MAX_CPU_ISA"] = "AMX_BF16"
tf.config.optimizer.set_experimental_options({'auto_mixed_precision_onednn_bfloat16':True})
bf16_model_withAmx = tf.keras.models.load_model('models/my_saved_model_fp32')

bf16_model_withAmx_export_path = "models/my_saved_model_bf16_with_amx"
bf16_model_withAmx.save(bf16_model_withAmx_export_path)

start = time.time()
bf16_withAmx_history = bf16_model_withAmx.evaluate(val_ds)
bf16_withAmx_inference_time = time.time() - start


# Summary of results
print("Summary")
print("FP32 inference time: %.3f" %fp32_inference_time)
print("BF16 with AVX512 inference time: %.3f" %bf16_noAmx_inference_time)
print("BF16 with AMX inference time: %.3f" %bf16_withAmx_inference_time)

import matplotlib.pyplot as plt
plt.figure()
plt.title("Resnet50 Inference Time")
plt.xlabel("Test Case")
plt.ylabel("Inference Time (seconds)")
plt.bar(["FP32", "BF16 with AVX512", "BF16 with AMX"], [fp32_inference_time, bf16_noAmx_inference_time, bf16_withAmx_inference_time])

fp32_inference_accuracy = fp32_history[1]
bf16_noAmx_inference_accuracy = bf16_noAmx_history[1]
bf16_withAmx_inference_accuracy = bf16_withAmx_history[1]
plt.figure()
plt.title("Resnet50 Inference Accuracy")
plt.xlabel("Test Case")
plt.ylabel("Inference Accuracy")
plt.bar(["FP32", "BF16 with AVX512", "BF16 with AMX"], [fp32_inference_accuracy, bf16_noAmx_inference_accuracy, bf16_withAmx_inference_accuracy])

print('[CODE_SAMPLE_COMPLETED_SUCCESFULLY]')
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
Copyright Intel Corporation

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
# Enabling Auto-Mixed Precision for Transfer Learning with TensorFlow
This tutorial guides you through the process of enabling auto-mixed precision to use low-precision datatypes, like bfloat16, for model inference with TensorFlow* (TF).

The Intel® Optimization for TensorFlow* enables TensorFlow* with optimizations for performance boost on Intel® hardware. For example, newer optimizations include AVX-512 Vector Neural Network Instructions (AVX512 VNNI) and Intel® Advanced Matrix Extensions (Intel® AMX).

| Area | Description
|:--- |:---
| What you will learn | Inference performance improvements with Intel® AMX BF16
| Time to complete | 10 minutes
| Category | Code Optimization


## Purpose

The Intel® Optimization for TensorFlow* gives users the ability to speed up inference on Intel® Xeon Scalable processors with lower precision data formats and specialized computer instructions. The bfloat16 (BF16) data format uses half the bit width of floating-point-32 (FP32), lowering the amount of memory needed and execution time to process. You should notice performance optimization with the AMX instruction set when compared to AVX-512.

## Prerequisites

| Optimized for | Description
|:--- |:---
| OS | Ubuntu* 18.04 or newer
| Hardware | Intel® Xeon® Scalable processor family or newer
| Software | Intel® AI Analytics Toolkit (AI Kit)

### For Local Development Environments

You will need to download and install the following toolkits, tools, and components to use the sample.

- **Create a virtual environment venv-tf using Python 3.8**

```
pip install virtualenv
# use `whereis python` to find the `python3.8` path in the system and specify it. Please install `Python3.8` if not installed on your system.
virtualenv -p /usr/bin/python3.8 venv-tf
source venv-tf/bin/activate

# If git, numactl and wget were not installed, please install them using
yum update -y && yum install -y git numactl wget
```

- **Install [Intel optimized TensorFlow](https://pypi.org/project/intel-tensorflow/2.11.dev202242/)**
```
# Install Intel Optimized TensorFlow
pip install intel-tensorflow==2.11.dev202242
pip install keras-nightly==2.11.0.dev2022092907
```

- **Jupyter Notebook**

Install using PIP: `$pip install notebook`. <br> Alternatively, see [*Installing Jupyter*](https://jupyter.org/install) for detailed installation instructions.

- **Other dependencies**

You will need to install the additional packages in requirements.txt and **Py-cpuinfo**.
```
pip install -r requirements.txt
python -m pip install py-cpuinfo
```

### For Intel® DevCloud

The necessary tools and components are already installed in the environment. You do not need to install additional components. See [Intel® DevCloud for oneAPI](https://devcloud.intel.com/oneapi/get_started/) for information.

## Key Implementation Details

This code sample uses a pre-trained on ResNet50v1.5 pretrained model, trained on the ImageNet dataset and fine-tuned on TensorFlow Flower dataset. The FP32 model is validated using FP32 and BF16 precision, including the use of Intel® Advanced Matrix Extensions (AMX) on BF16. AMX is supported on BF16 and INT8 data types starting with 4th Gen Xeon Scalable Processors. The inference time will be compared, showcasing the speedup of BF16 and AMX.
The sample tutorial contains one Jupyter Notebook and one Python script. You can use either.

### Jupyter Notebook

| Notebook | Description
|:--- |:---
|`IntelTensorFlow_AMX_BF16_Inference.ipynb` | TensorFlow Inference Optimizations with Advanced Matrix Extensions Bfloat16

### Python Scripts

| Script | Description
|:--- |:---
|`IntelTensorFlow_AMX_BF16_Inference.py` | The script performs inference with AMX BF16 and compares the performance against the baseline


## Run the Sample on Linux*
1. Launch Jupyter Notebook.
```
jupyter notebook --ip=0.0.0.0
```
2. Follow the instructions to open the URL with the token in your browser.
3. Locate and select the Notebook.
```
enabling_automixed_precision_transfer_learning_tensorflow.ipynb
````
4. Change your Jupyter Notebook kernel to **tensorflow** or **intel-tensorflow**.
5. Run every cell in the Notebook in sequence.


### Run the Sample on Intel® DevCloud

1. If you do not already have an account, request an Intel® DevCloud account at [*Create an Intel® DevCloud Account*](https://intelsoftwaresites.secure.force.com/DevCloud/oneapi).
2. On a Linux* system, open a terminal.
3. SSH into Intel® DevCloud.
```
ssh DevCloud
```
> **Note**: You can find information about configuring your Linux system and connecting to Intel DevCloud at Intel® DevCloud for oneAPI [Get Started](https://devcloud.intel.com/oneapi/get_started).
4. Follow the instructions to open the URL with the token in your browser.
5. Locate and select the Notebook.
```
enabling_automixed_precision_transfer_learning_tensorflow.ipynb
````
6. Change the kernel to **tensorflow** or **intel-tensorflow**.
7. Run every cell in the Notebook in sequence.


#### Troubleshooting

If you receive an error message, troubleshoot the problem using the **Diagnostics Utility for Intel® oneAPI Toolkits**. The diagnostic utility provides configuration and system checks to help find missing dependencies, permissions errors, and other issues. See the [Diagnostics Utility for Intel® oneAPI Toolkits User Guide](https://www.intel.com/content/www/us/en/develop/documentation/diagnostic-utility-user-guide/top.html) for more information on using the utility.


## Example Output
If successful, the sample displays [CODE_SAMPLE_COMPLETED_SUCCESSFULLY]. Additionally, the sample generates performance and analysis diagrams for comparison.

The following image shows approximate performance speed increases using AMX BF16 with auto-mixed precision during inference. To see more performance improvement between AVX-512 BF16 and AMX BF16, increase the number of required computations in one batch.

![Inference Speedup](images/inference-perf-comp.png)

With the imporovement on model inference speed, using AMX BF16 with auto-mixed precision during inference will not influence the inference accuracy.

![Inference Speedup](images/inference-acc-comp.png)


## License

Code samples are licensed under the MIT license. See
[License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt) for details.

Third party program Licenses can be found here: [third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt).
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import os

def runJupyterNotebook(input_notebook_filename, output_notebook_filename, conda_env, fdpath='./'):
import nbformat
import os
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors import CellExecutionError
if os.path.isfile(input_notebook_filename) is False:
print("No Jupyter notebook found : ",input_notebook_filename)
try:
with open(input_notebook_filename) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=6000, kernel_name=conda_env, allow_errors=True)
ep.preprocess(nb, {'metadata': {'path': fdpath}})
with open(output_notebook_filename, 'w', encoding='utf-8') as f:
nbformat.write(nb, f)
return 0
except CellExecutionError:
print("Exception!")
return -1


runJupyterNotebook(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'IntelTensorFlow_AMX_BF16_Inference.ipynb'),
'IntelTensorFlow_AMX_BF16_Inference_result.ipynb',
'tensorflow')
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@

�root"_tf_keras_sequential*�{"name": "sequential_1", "trainable": true, "expects_training_arg": true, "dtype": "float32", "batch_input_shape": null, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": false, "class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 224, 224, 3]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "keras_layer_input"}}, {"class_name": "KerasLayer", "config": {"name": "keras_layer", "trainable": false, "dtype": "float32", "batch_input_shape": {"class_name": "__tuple__", "items": [null, 224, 224, 3]}, "handle": "https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/5"}}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 5, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "shared_object_id": 5, "input_spec": [{"class_name": "InputSpec", "config": {"dtype": null, "shape": {"class_name": "__tuple__", "items": [null, 224, 224, 3]}, "ndim": 4, "max_ndim": null, "min_ndim": null, "axes": {}}}], "build_input_shape": {"class_name": "TensorShape", "items": [null, 224, 224, 3]}, "is_graph_network": true, "full_save_spec": {"class_name": "__tuple__", "items": [[{"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 224, 224, 3]}, "float32", "keras_layer_input"]}], {}]}, "save_spec": {"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 224, 224, 3]}, "float32", "keras_layer_input"]}, "keras_version": "2.11.0", "backend": "tensorflow", "model_config": {"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 224, 224, 3]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "keras_layer_input"}, "shared_object_id": 0}, {"class_name": "KerasLayer", "config": {"name": "keras_layer", "trainable": false, "dtype": "float32", "batch_input_shape": {"class_name": "__tuple__", "items": [null, 224, 224, 3]}, "handle": "https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/5"}, "shared_object_id": 1}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 5, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 2}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 3}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "shared_object_id": 4}]}}, "training_config": {"loss": {"class_name": "SparseCategoricalCrossentropy", "config": {"reduction": "auto", "name": "sparse_categorical_crossentropy", "from_logits": true, "ignore_class": null}, "shared_object_id": 7}, "metrics": [[{"class_name": "MeanMetricWrapper", "config": {"name": "acc", "dtype": "float32", "fn": "sparse_categorical_accuracy"}, "shared_object_id": 8}]], "weighted_metrics": null, "loss_weights": null, "optimizer_config": {"class_name": "Custom>SGD", "config": {"name": "SGD", "weight_decay": null, "clipnorm": null, "global_clipnorm": null, "clipvalue": null, "use_ema": false, "ema_momentum": 0.99, "ema_overwrite_frequency": null, "jit_compile": false, "is_legacy_optimizer": false, "learning_rate": 0.009999999776482582, "momentum": 0.0, "nesterov": false}}}}2
�root.layer_with_weights-0"_tf_keras_layer*�{"name": "keras_layer", "trainable": false, "expects_training_arg": true, "dtype": "float32", "batch_input_shape": {"class_name": "__tuple__", "items": [null, 224, 224, 3]}, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "KerasLayer", "config": {"name": "keras_layer", "trainable": false, "dtype": "float32", "batch_input_shape": {"class_name": "__tuple__", "items": [null, 224, 224, 3]}, "handle": "https://tfhub.dev/google/imagenet/resnet_v1_50/feature_vector/5"}, "shared_object_id": 1, "build_input_shape": {"class_name": "TensorShape", "items": [512, 224, 224, 3]}}2
�root.layer_with_weights-1"_tf_keras_layer*�{"name": "dense_1", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 5, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 2}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 3}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "shared_object_id": 4, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 2, "axes": {"-1": 2048}}, "shared_object_id": 9}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 2048]}}2
��root.keras_api.metrics.0"_tf_keras_metric*�{"class_name": "Mean", "name": "loss", "dtype": "float32", "config": {"name": "loss", "dtype": "float32"}, "shared_object_id": 10}2
��root.keras_api.metrics.1"_tf_keras_metric*�{"class_name": "MeanMetricWrapper", "name": "acc", "dtype": "float32", "config": {"name": "acc", "dtype": "float32", "fn": "sparse_categorical_accuracy"}, "shared_object_id": 8}2
Binary file not shown.
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
notebook
Pillow
tensorflow_hub
requests

Loading