diff --git a/hls4ml/converters/__init__.py b/hls4ml/converters/__init__.py index e38e82ac1..203e31a66 100644 --- a/hls4ml/converters/__init__.py +++ b/hls4ml/converters/__init__.py @@ -121,8 +121,16 @@ def convert_from_config(config): model = onnx_to_hls(yamlConfig) elif 'PytorchModel' in yamlConfig: model = pytorch_to_hls(yamlConfig) - else: - model = keras_v2_to_hls(yamlConfig) + elif 'KerasModel' in yamlConfig: + import keras + + if keras.__version__ >= '3.0': + # Get fallback flags from config or use defaults + allow_da_fallback = yamlConfig.get('HLSConfig', {}).get('Model', {}).get('AllowDAFallback', True) + allow_v2_fallback = yamlConfig.get('HLSConfig', {}).get('Model', {}).get('AllowV2Fallback', True) + model = keras_v3_to_hls(yamlConfig, allow_da_fallback, allow_v2_fallback) + else: + model = keras_v2_to_hls(yamlConfig) return model diff --git a/hls4ml/model/profiling.py b/hls4ml/model/profiling.py index e122e3f46..81052a589 100644 --- a/hls4ml/model/profiling.py +++ b/hls4ml/model/profiling.py @@ -27,7 +27,7 @@ __torch_profiling_enabled__ = False try: - import qkeras + from qkeras import QActivation __qkeras_profiling_enabled__ = True except ImportError: @@ -37,7 +37,7 @@ if __keras_profiling_enabled__: __keras_activations.append(keras.layers.Activation) if __qkeras_profiling_enabled__: - __keras_activations.append(qkeras.QActivation) + __keras_activations.append(QActivation) def get_unoptimized_hlsmodel(model): diff --git a/test/pytest/generate_ci_yaml.py b/test/pytest/generate_ci_yaml.py index fccf83de6..684abc051 100644 --- a/test/pytest/generate_ci_yaml.py +++ b/test/pytest/generate_ci_yaml.py @@ -28,7 +28,14 @@ # Long-running tests will not be bundled with other tests LONGLIST = {'test_hgq_layers', 'test_hgq_players', 'test_qkeras', 'test_pytorch_api'} -KERAS3_LIST = {'test_keras_v3_api', 'test_hgq2_mha', 'test_einsum_dense', 'test_qeinsum', 'test_multiout_onnx'} +KERAS3_LIST = { + 'test_keras_v3_api', + 'test_hgq2_mha', + 'test_einsum_dense', + 'test_qeinsum', + 'test_multiout_onnx', + 'test_keras_v3_profiling', +} # Test files to split by individual test cases # Value = chunk size per CI job diff --git a/test/pytest/test_keras_v3_profiling.py b/test/pytest/test_keras_v3_profiling.py new file mode 100644 index 000000000..f98e69963 --- /dev/null +++ b/test/pytest/test_keras_v3_profiling.py @@ -0,0 +1,141 @@ +"""Test numerical profiling with Keras v3 models.""" + +import numpy as np +import pytest + +try: + import keras + + __keras_profiling_enabled__ = keras.__version__ >= '3.0' +except ImportError: + __keras_profiling_enabled__ = False + +if __keras_profiling_enabled__: + from hls4ml.model.profiling import numerical + + +def count_bars_in_figure(fig): + """Count the number of bars in all axes of a figure.""" + count = 0 + for ax in fig.get_axes(): + count += len(ax.patches) + return count + + +@pytest.mark.skipif(not __keras_profiling_enabled__, reason='Keras 3.0 or higher is required') +def test_keras_v3_numerical_profiling_simple_model(): + """Test numerical profiling with a simple Keras v3 Dense model.""" + model = keras.Sequential( + [ + keras.layers.Dense(20, input_shape=(10,), activation='relu'), + keras.layers.Dense(5, activation='softmax'), + ] + ) + model.compile(optimizer='adam', loss='categorical_crossentropy') + # Build the model so weights are initialized + model.build((None, 10)) + + # Test profiling weights only + wp, _, _, _ = numerical(model) + assert wp is not None + # Should have 2 bars (one per layer, each showing weights and biases combined) + assert count_bars_in_figure(wp) == 2 + + +@pytest.mark.skipif(not __keras_profiling_enabled__, reason='Keras 3.0 or higher is required') +def test_keras_v3_numerical_profiling_with_activations(): + """Test numerical profiling with Keras v3 model including activations.""" + # Use functional API instead of Sequential to ensure input layer is properly defined + inputs = keras.Input(shape=(10,)) + x = keras.layers.Dense(20, activation='relu')(inputs) + outputs = keras.layers.Dense(5)(x) + model = keras.Model(inputs=inputs, outputs=outputs) + model.compile(optimizer='adam', loss='mse') + + # Generate test data + X_test = np.random.rand(100, 10).astype(np.float32) + + # Test profiling with activations + wp, _, ap, _ = numerical(model, X=X_test) + assert wp is not None + assert ap is not None + + +@pytest.mark.skipif(not __keras_profiling_enabled__, reason='Keras 3.0 or higher is required') +def test_keras_v3_numerical_profiling_conv_model(): + """Test numerical profiling with a Keras v3 Conv model.""" + model = keras.Sequential( + [ + keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(28, 28, 1)), + keras.layers.MaxPooling2D((2, 2)), + keras.layers.Flatten(), + keras.layers.Dense(10, activation='softmax'), + ] + ) + model.compile(optimizer='adam', loss='categorical_crossentropy') + # Build the model so weights are initialized + model.build((None, 28, 28, 1)) + + # Test profiling weights + wp, _, _, _ = numerical(model) + assert wp is not None + # Conv layer has 1 bar, Dense layer has 1 bar = 2 bars total + assert count_bars_in_figure(wp) == 2 + + +@pytest.mark.skipif(not __keras_profiling_enabled__, reason='Keras 3.0 or higher is required') +@pytest.mark.skip(reason='convert_from_config needs update for Keras v3 model serialization format') +def test_keras_v3_numerical_profiling_with_hls_model(): + """Test numerical profiling with both Keras v3 model and hls4ml model.""" + import hls4ml + + # Use functional API to ensure input layer is properly defined + inputs = keras.Input(shape=(8,)) + x = keras.layers.Dense(16, activation='relu')(inputs) + outputs = keras.layers.Dense(4, activation='softmax')(x) + model = keras.Model(inputs=inputs, outputs=outputs) + model.compile(optimizer='adam', loss='categorical_crossentropy') + + # Generate test data + X_test = np.random.rand(100, 8).astype(np.float32) + + # Create hls4ml model + config = hls4ml.utils.config_from_keras_model(model, granularity='name') + hls_model = hls4ml.converters.convert_from_keras_model( + model, + hls_config=config, + output_dir='/tmp/test_keras_v3_profiling_hls', + backend='Vivado', + allow_da_fallback=True, + allow_v2_fallback=True, + ) + + # Test profiling with both models + wp, wph, ap, aph = numerical(model, hls_model=hls_model, X=X_test) + + assert wp is not None # Keras model weights (before optimization) + assert wph is not None # HLS model weights (after optimization) + assert ap is not None # Keras model activations (before optimization) + assert aph is not None # HLS model activations (after optimization) + + +@pytest.mark.skipif(not __keras_profiling_enabled__, reason='Keras 3.0 or higher is required') +def test_keras_v3_numerical_profiling_batch_norm(): + """Test numerical profiling with Keras v3 model containing BatchNormalization.""" + model = keras.Sequential( + [ + keras.layers.Dense(20, input_shape=(10,)), + keras.layers.BatchNormalization(), + keras.layers.Activation('relu'), + keras.layers.Dense(5, activation='softmax'), + ] + ) + model.compile(optimizer='adam', loss='categorical_crossentropy') + # Build the model so weights are initialized + model.build((None, 10)) + + # Test profiling weights + wp, _, _, _ = numerical(model) + assert wp is not None + # Dense has 1 bar, BatchNorm has 1 bar, second Dense has 1 bar = 3 bars + assert count_bars_in_figure(wp) == 3