From 0e8462e7e3e2e69e03a964fa4210a89eeafd4dd7 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Mon, 5 Jun 2023 23:51:11 +0200
Subject: [PATCH 01/23] DOC: bump version in snippets, 4.0.0 --> 4.1.0

---
 README.md         |  4 ++--
 doc/docker_use.md | 28 +++++++++++++++++-----------
 2 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/README.md b/README.md
index f4eac4f2..ec51714c 100644
--- a/README.md
+++ b/README.md
@@ -33,8 +33,8 @@ The documentation is available on [otbtf.readthedocs.io](https://otbtf.readthedo
 You can use our latest GPU enabled docker images.
 
 ```bash
-docker run --runtime=nvidia -ti mdl4eo/otbtf:4.0.0-gpu otbcli_PatchesExtraction
-docker run --runtime=nvidia -ti mdl4eo/otbtf:4.0.0-gpu python -c "import otbtf"
+docker run --runtime=nvidia -ti mdl4eo/otbtf:4.1.0-gpu otbcli_PatchesExtraction
+docker run --runtime=nvidia -ti mdl4eo/otbtf:4.1.0-gpu python -c "import otbtf"
 ```
 
 You can also build OTBTF from sources (see the documentation)
diff --git a/doc/docker_use.md b/doc/docker_use.md
index ebbab5a4..0c343fef 100644
--- a/doc/docker_use.md
+++ b/doc/docker_use.md
@@ -5,13 +5,13 @@ We recommend to use OTBTF from official docker images.
 Latest CPU-only docker image:
 
 ```commandline
-docker pull mdl4eo/otbtf:4.0.0-cpu
+docker pull mdl4eo/otbtf:4.1.0-cpu
 ```
 
 Latest GPU-ready docker image:
 
 ```commandline
-docker pull mdl4eo/otbtf:4.0.0-gpu
+docker pull mdl4eo/otbtf:4.1.0-gpu
 ```
 
 Read more in the following sections.
@@ -25,12 +25,12 @@ Since OTBTF >= 3.2.1 you can find the latest docker images on
 
 | Name                                                                               | Os            | TF    | OTB   | Description            | Dev files | Compute capability |
 |------------------------------------------------------------------------------------| ------------- |-------|-------| ---------------------- | --------- | ------------------ |
-| **mdl4eo/otbtf:4.0.0-cpu**                                                         | Ubuntu Jammy  | r2.12 | 8.1.0 | CPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
-| **mdl4eo/otbtf:4.0.0-cpu-dev**                                                     | Ubuntu Jammy  | r2.12 | 8.1.0 | CPU, no optimization (dev) |  yes  | 5.2,6.1,7.0,7.5,8.6|
-| **mdl4eo/otbtf:4.0.0-gpu**                                                         | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
-| **mdl4eo/otbtf:4.0.0-gpu-dev**                                                     | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU, no optimization (dev) | yes   | 5.2,6.1,7.0,7.5,8.6|
-| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.0.0-gpu-opt**     | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU with opt.          | no        | 5.2,6.1,7.0,7.5,8.6|
-| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.0.0-gpu-opt-dev** | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU with opt. (dev)    | yes       | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.1.0-cpu**                                                         | Ubuntu Jammy  | r2.12 | 8.1.0 | CPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.1.0-cpu-dev**                                                     | Ubuntu Jammy  | r2.12 | 8.1.0 | CPU, no optimization (dev) |  yes  | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.1.0-gpu**                                                         | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.1.0-gpu-dev**                                                     | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU, no optimization (dev) | yes   | 5.2,6.1,7.0,7.5,8.6|
+| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.1.0-gpu-opt**     | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU with opt.          | no        | 5.2,6.1,7.0,7.5,8.6|
+| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.1.0-gpu-opt-dev** | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU with opt. (dev)    | yes       | 5.2,6.1,7.0,7.5,8.6|
 
 The list of older releases is available [here](#older-images).
 
@@ -51,13 +51,13 @@ You can then use the OTBTF `gpu` tagged docker images with the **NVIDIA runtime*
 With Docker version earlier than 19.03 :
 
 ```bash
-docker run --runtime=nvidia -ti mdl4eo/otbtf:4.0.0-gpu bash
+docker run --runtime=nvidia -ti mdl4eo/otbtf:4.1.0-gpu bash
 ```
 
 With Docker version including and after 19.03 :
 
 ```bash
-docker run --gpus all -ti mdl4eo/otbtf:4.0.0-gpu bash
+docker run --gpus all -ti mdl4eo/otbtf:4.1.0-gpu bash
 ```
 
 You can find some details on the **GPU docker image** and some **docker tips 
@@ -80,7 +80,7 @@ See here how to install docker on Ubuntu
 1. Install [WSL2](https://docs.microsoft.com/en-us/windows/wsl/install-win10#manual-installation-steps) (Windows Subsystem for Linux)
 2. Install [docker desktop](https://www.docker.com/products/docker-desktop)
 3. Start **docker desktop** and **enable WSL2** from *Settings* > *General* then tick the box *Use the WSL2 based engine*
-3. Open a **cmd.exe** or **PowerShell** terminal, and type `docker create --name otbtf-cpu --interactive --tty mdl4eo/otbtf:4.0.0-cpu`
+3. Open a **cmd.exe** or **PowerShell** terminal, and type `docker create --name otbtf-cpu --interactive --tty mdl4eo/otbtf:4.1.0-cpu`
 4. Open **docker desktop**, and check that the docker is running in the **Container/Apps** menu
 ![Docker desktop, after the docker image is downloaded and ready to use](images/docker_desktop_1.jpeg)
 5. From **docker desktop**, click on the icon highlighted as shown below, and use the bash terminal that should pop up!
@@ -160,4 +160,10 @@ Here you can find the list of older releases of OTBTF:
 | **mdl4eo/otbtf:3.4.0-gpu-dev**                                                     | Ubuntu Focal  | r2.8   | 8.1.0 | GPU, no optimization (dev) | yes   | 5.2,6.1,7.0,7.5,8.6|
 | **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:3.4.0-gpu-opt**     | Ubuntu Focal  | r2.8   | 8.1.0 | GPU with opt.          | no        | 5.2,6.1,7.0,7.5,8.6|
 | **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:3.4.0-gpu-opt-dev** | Ubuntu Focal  | r2.8   | 8.1.0 | GPU with opt. (dev)    | yes       | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.0.0-cpu**                                                         | Ubuntu Jammy  | r2.12  | 8.1.0 | CPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.0.0-cpu-dev**                                                     | Ubuntu Jammy  | r2.12  | 8.1.0 | CPU, no optimization (dev) |  yes  | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.0.0-gpu**                                                         | Ubuntu Jammy  | r2.12  | 8.1.0 | GPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.0.0-gpu-dev**                                                     | Ubuntu Jammy  | r2.12  | 8.1.0 | GPU, no optimization (dev) | yes   | 5.2,6.1,7.0,7.5,8.6|
+| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.0.0-gpu-opt**     | Ubuntu Jammy  | r2.12  | 8.1.0 | GPU with opt.          | no        | 5.2,6.1,7.0,7.5,8.6|
+| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.0.0-gpu-opt-dev** | Ubuntu Jammy  | r2.12  | 8.1.0 | GPU with opt. (dev)    | yes       | 5.2,6.1,7.0,7.5,8.6|
 
-- 
GitLab


From 76dca56a3ae57e991bce509e10da8a712b4609fb Mon Sep 17 00:00:00 2001
From: Cresson Remi <remi.cresson@irstea.fr>
Date: Mon, 14 Aug 2023 13:10:17 +0200
Subject: [PATCH 02/23] Fix cropping value in documentation

---
 doc/api_tutorial.md | 72 +++++++++++++++++++++++++--------------------
 1 file changed, 40 insertions(+), 32 deletions(-)

diff --git a/doc/api_tutorial.md b/doc/api_tutorial.md
index bcd6ea45..8c95190c 100644
--- a/doc/api_tutorial.md
+++ b/doc/api_tutorial.md
@@ -386,21 +386,28 @@ $$
 Let's consider a chunk of input image of size 128, and check the valid output 
 size of our model:
 
-| Conv. name | Conv. type        | Kernel | Stride | Out. size | Valid out. size |
-|------------|-------------------|--------|--------|-----------|-----------------|
-| *conv1*    | Conv2D            | 3      | 2      | 64        | 63              |
-| *conv2*    | Conv2D            | 3      | 2      | 32        | 30              |
-| *conv3*    | Conv2D            | 3      | 2      | 16        | 14              |
-| *conv4*    | Conv2D            | 3      | 2      | 8         | 6               |
-| *tconv1*   | Transposed Conv2D | 3      | 2      | 16        | 10              |
-| *tconv2*   | Transposed Conv2D | 3      | 2      | 32        | 18              |
-| *tconv3*   | Transposed Conv2D | 3      | 2      | 64        | 34              |
+| Conv. name     | Conv. type        | Kernel | Stride | Out. size | Valid out. size |
+|----------------|-------------------|--------|--------|-----------|-----------------|
+| *input*        | /                 | /      | /      | 128       | 128             |
+| *conv1*        | Conv2D            | 3      | 2      | 64        | 63              |
+| *conv2*        | Conv2D            | 3      | 2      | 32        | 30              |
+| *conv3*        | Conv2D            | 3      | 2      | 16        | 14              |
+| *conv4*        | Conv2D            | 3      | 2      | 8         | 6               |
+| *tconv1*       | Transposed Conv2D | 3      | 2      | 16        | 10              |
+| *tconv2*       | Transposed Conv2D | 3      | 2      | 32        | 18              |
+| *tconv3*       | Transposed Conv2D | 3      | 2      | 64        | 34              |
+| *classifier*   | Transposed Conv2D | 3      | 2      | 128       | 66              |
 
 This shows that our model can be applied in a fully convolutional fashion 
 without generating blocking artifacts, using the central part of the output of 
-size 34. This is equivalent to remove \((128 - 24)/2 = 47\) pixels from 
-the borders of the output. We can hence use the output cropped with **64** 
-pixels, named ***predictions_crop64***.
+size 66. This is equivalent to remove \((128 - 66)/2 = 31\) pixels from 
+the borders of the output. We keep the upper nearest power of 2 to keep the 
+convolutions consistent between two adjacent image chunks, hence we can remove 32 
+pixels from the borders. We can hence use the output cropped with **32** pixels, 
+named ***predictions_crop32*** in the model outputs.
+By default, cropped outputs in `otbtf.ModelBase` are generated for the following 
+values: `[16, 32, 64, 96, 128]` but that can be changed setting `inference_cropping` 
+in the model `__init__()` (see the reference API documentation for details).
 
 !!! Info
 
@@ -427,10 +434,11 @@ In the following subsections, we run `TensorflowModelServe` over the input
 image, with the following parameters:
 
 - the input name is ***input_xs***
-- the output name is ***predictions_crop64*** (cropping margin of 64 pixels)
-- we choose a receptive field of ***256*** and an expression field of 
-***128*** so that they match the cropping margin of 64 pixels. 
-
+- the output name is ***predictions_crop32*** (cropping margin of 32 pixels)
+- we choose a receptive field of ***128*** and an expression field of 
+***64*** so that they match the cropping margin of 32 pixels (since we remove 
+32 pixels from each side in x and y dimensions, we remove a total of 64 pixels 
+from each borders in x/y dimensions). 
 
 ### Command Line Interface
 
@@ -439,14 +447,14 @@ Open a terminal and run the following command:
 ```commandline
 otbcli_TensorflowModelServe \
 -source1.il $DATADIR/fake_spot6.jp2 \
--source1.rfieldx 256 \ 
--source1.rfieldy 256 \
+-source1.rfieldx 128 \ 
+-source1.rfieldy 128 \
 -source1.placeholder "input_xs" \
 -model.dir /tmp/my_1st_savedmodel \
 -model.fullyconv on \
--output.names "predictions_crop64" \
--output.efieldx 128 \
--output.efieldy 128 \
+-output.names "predictions_crop32" \
+-output.efieldx 64 \
+-output.efieldy 64 \
 -out softmax.tif
 ```
 
@@ -459,14 +467,14 @@ python wrapper:
 import otbApplication
 app = otbApplication.Registry.CreateApplication("TensorflowModelServe")
 app.SetParameterStringList("source1.il", ["fake_spot6.jp2"])
-app.SetParameterInt("source1.rfieldx", 256)
-app.SetParameterInt("source1.rfieldy", 256)
+app.SetParameterInt("source1.rfieldx", 128)
+app.SetParameterInt("source1.rfieldy", 128)
 app.SetParameterString("source1.placeholder", "input_xs")
 app.SetParameterString("model.dir", "/tmp/my_1st_savedmodel")
 app.EnableParameter("fullyconv")
-app.SetParameterStringList("output.names", ["predictions_crop64"])
-app.SetParameterInt("output.efieldx", 128)
-app.SetParameterInt("output.efieldy", 128)
+app.SetParameterStringList("output.names", ["predictions_crop32"])
+app.SetParameterInt("output.efieldx", 64)
+app.SetParameterInt("output.efieldy", 64)
 app.SetParameterString("out", "softmax.tif")
 app.ExecuteAndWriteOutput()
 ```
@@ -479,14 +487,14 @@ Using PyOTB is nicer:
 import pyotb
 pyotb.TensorflowModelServe({
     "source1.il": "fake_spot6.jp2",
-    "source1.rfieldx": 256,
-    "source1.rfieldy": 256,
+    "source1.rfieldx": 128,
+    "source1.rfieldy": 128,
     "source1.placeholder": "input_xs",
     "model.dir": "/tmp/my_1st_savedmodel",
     "fullyconv": True,
-    "output.names": ["predictions_crop64"],
-    "output.efieldx": 128,
-    "output.efieldy": 128,
+    "output.names": ["predictions_crop32"],
+    "output.efieldx": 64,
+    "output.efieldy": 64,
     "out": "softmax.tif",
 })
 ```
@@ -499,4 +507,4 @@ pyotb.TensorflowModelServe({
     control the output image chunk size and tiling/stripping layout. Combined 
     with the `optim` parameters, you will likely always find the best settings 
     suited for the hardware. Also, the receptive and expression fields sizes 
-    have a major contribution.
\ No newline at end of file
+    have a major contribution.
-- 
GitLab


From 3439719fb2d4275627990e6e5ed440b60a5f0679 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Wed, 16 Aug 2023 10:21:19 +0200
Subject: [PATCH 03/23] COMP: fix #45

---
 Dockerfile | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/Dockerfile b/Dockerfile
index 711dc4ba..22f9bb19 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -27,7 +27,9 @@ RUN ln -s /usr/bin/python3 /usr/local/bin/python && ln -s /usr/bin/pip3 /usr/loc
 RUN pip install --no-cache-dir pip --upgrade
 # NumPy version is conflicting with system's gdal dep and may require venv
 ARG NUMPY_SPEC="==1.22.*"
-RUN pip install --no-cache-dir -U wheel mock six future tqdm deprecated "numpy$NUMPY_SPEC" packaging requests \
+# This is to avoid https://github.com/tensorflow/tensorflow/issues/61551
+ARG PROTO_SPEC="==4.23.*"
+RUN pip install --no-cache-dir -U wheel mock six future tqdm deprecated "numpy$NUMPY_SPEC" "protobuf$PROTO_SPEC" packaging requests \
  && pip install --no-cache-dir --no-deps keras_applications keras_preprocessing
 
 # ----------------------------------------------------------------------------
-- 
GitLab


From 31d94342fc658f7d13b8d559b4252ac6d2766745 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Wed, 16 Aug 2023 10:21:51 +0200
Subject: [PATCH 04/23] DOC: tensor typing

---
 otbtf/model.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/otbtf/model.py b/otbtf/model.py
index b3ee7b92..9958510b 100644
--- a/otbtf/model.py
+++ b/otbtf/model.py
@@ -28,7 +28,8 @@ import abc
 import logging
 import tensorflow as tf
 
-TensorsDict = Dict[str, Any]
+Tensor = Any
+TensorsDict = Dict[str, Tensor]
 
 
 class ModelBase(abc.ABC):
-- 
GitLab


From 761c0a56d317396a3b76f9cffe8e1d08f706f9b4 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Wed, 16 Aug 2023 10:22:47 +0200
Subject: [PATCH 05/23] ADD: keras layers for argmax, max, binary dilated mask

---
 otbtf/layers.py | 190 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 190 insertions(+)
 create mode 100644 otbtf/layers.py

diff --git a/otbtf/layers.py b/otbtf/layers.py
new file mode 100644
index 00000000..6da80bd7
--- /dev/null
+++ b/otbtf/layers.py
@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -*-
+# ==========================================================================
+#
+#   Copyright 2018-2019 IRSTEA
+#   Copyright 2020-2023 INRAE
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#          http://www.apache.org/licenses/LICENSE-2.0.txt
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# ==========================================================================*/
+"""
+[Source code :fontawesome-brands-github:](https://github.com/remicres/otbtf/
+tree/master/otbtf/ops.py){ .md-button }
+
+The utils module provides some useful keras layers to build deep nets.
+"""
+from typing import List, Tuple, Any
+import tensorflow as tf
+
+
+Tensor = Any
+Scalars = List[float] | Tuple[Float]
+
+
+class DilatedMask(keras.layers.Layer):
+    """Layer to dilate a binary mask."""
+    def __init__(self, nodata_value: float, radius: int, name: str = None):
+        """
+        Params:
+            nodata_value: the no-data value of the binary mask
+            radius: dilatation radius
+            name: layer name
+
+        """
+        self.nodata_value = nodata_value
+        self.radius = radius
+        super().__init__(name=name)
+
+    def call(self, inp: Tensor):
+        """
+        Params:
+            inp: input layer
+
+        """
+        # Compute a binary mask from the input
+        nodata_mask = tf.cast(tf.math.equal(inp, self.nodata_value), tf.uint8)
+
+        se_size = 1 + 2 * self.radius
+        # Create a morphological kernel suitable for binary dilatation, see 
+        # https://stackoverflow.com/q/54686895/13711499
+        kernel = tf.zeros((se_size, se_size, 1), dtype=tf.uint8)
+        conv2d_out = tf.nn.dilation2d(
+            input=nodata_mask,
+            filters=kernel,
+            strides=[1, 1, 1, 1],
+            padding="SAME",
+            data_format="NHWC",
+            dilations=[1, 1, 1, 1],
+            name="dilatation_conv2d"
+        )
+        return tf.cast(conv2d_out, tf.uint8)
+
+
+class ApplyMask(keras.layers.Layer):
+    """Layer to apply a binary mask to one input."""
+    def __init__(self, out_nodata: float, name: str = None):
+        """
+        Params:
+            out_nodata: output no-data value, set when the mask is 1
+            name: layer name
+
+        """
+        super().__init__(name=name)
+        self.out_nodata = out_nodata
+
+    def call(self, inputs: Tuple[Tensor] | List[Tensor]):
+        """
+        Params:
+            inputs: (mask, input). list or tuple of size 2. First element is
+                the binary mask, second element is the input. In the binary
+                mask, values at 1 indicate where to replace input values with
+                no-data.
+
+        """
+        mask, inp = inputs
+        return tf.where(mask == 1, float(self.out_nodata), inp)
+
+
+class ScalarsTile(keras.layers.Layer):
+    """
+    Layer to duplicate some scalars in a whole array.
+    Simple example with only one scalar = 0.152:
+        output [[0.152, 0.152, 0.152],
+                [0.152, 0.152, 0.152],
+                [0.152, 0.152, 0.152]]
+
+    """
+    def __init__(self, name: str = None):
+        """
+        Params:
+            name: layer name
+
+        """
+        super().__init__(name=name)
+
+    def call(self, inputs: List[Tensor | Scalars] | Tuple[Tensor | Scalars]):
+        """
+        Params:
+            inputs: [reference, scalar inputs]. Reference is the tensor whose
+                shape has to be matched, is expected to be of shape [x, y, n].
+                scalar inputs are expected to be of shape [1] or [n] so that
+                they fill the last dimension of the output.
+
+        """
+        ref, scalar_inputs = inputs
+        inp = tf.stack(scalar_inputs, axis=-1)
+        inp = tf.expand_dims(tf.expand_dims(inp, axis=1), axis=1)
+        return tf.tile(inp, [1, tf.shape(ref)[1], tf.shape(ref)[2], 1])
+
+
+class Argmax(keras.layers.Layer):
+    """
+    Layer to compute the argmax of a tensor.
+
+    For example, for a vector A=[0.1, 0.3, 0.6], the output is 2 because
+    A[2] is the max.
+    Useful to transform a softmax into a "categorical" map for instance.
+
+    """
+    def __init__(self, name: str = None):
+        """
+        Params:
+            name: layer name
+
+        """
+        super().__init__(name=name)
+
+    def call(self, inputs):
+        """
+        Params:
+            inputs: softmax tensor, or any tensor with last dimension of
+                size nb_classes
+
+        Returns:
+            Index of the maximum value, in the last dimension. Int32.
+            The output tensor has same shape length as input, but with last
+            dimension of size 1. Contains integer values ranging from 0 to
+            (nb_classes - 1).
+
+        """
+        return tf.expand_dims(tf.math.argmax(inputs, axis=-1), axis=-1)
+
+
+class Max(keras.layers.Layer):
+    """
+    Layer to compute the max of a tensor.
+
+    For example, for a vector [0.1, 0.3, 0.6], the output is 0.6
+    Useful to transform a softmax into a "confidence" map for instance
+
+    """
+    def __init__(self, name=None):
+        """
+        Params:
+            name: layer name
+
+        """
+        super().__init__(name=name)
+
+    def call(self, inputs):
+        """
+        Params:
+            inputs: softmax tensor
+
+        Returns:
+            Maximum value along the last axis of the input.
+            The output tensor has same shape length as input, but with last
+            dimension of size 1.
+
+        """
+        return tf.expand_dims(tf.math.reduce_max(inputs, axis=-1), axis=-1)
-- 
GitLab


From 6a46c034a21fdab2f258e971ee5a1da7cafb56cb Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Wed, 16 Aug 2023 10:23:08 +0200
Subject: [PATCH 06/23] ADD: easy one-hot encoding

---
 otbtf/ops.py | 46 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 46 insertions(+)
 create mode 100644 otbtf/ops.py

diff --git a/otbtf/ops.py b/otbtf/ops.py
new file mode 100644
index 00000000..3c65b4b7
--- /dev/null
+++ b/otbtf/ops.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# ==========================================================================
+#
+#   Copyright 2018-2019 IRSTEA
+#   Copyright 2020-2023 INRAE
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#          http://www.apache.org/licenses/LICENSE-2.0.txt
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# ==========================================================================*/
+"""
+[Source code :fontawesome-brands-github:](https://github.com/remicres/otbtf/
+tree/master/otbtf/ops.py){ .md-button }
+
+The utils module provides some useful Tensorflow ad keras operators to build
+and train deep nets.
+"""
+from typing import List, Tuple, Any
+import tensorflow as tf
+
+
+Tensor = Any
+Scalars = List[float] | Tuple[Float]
+def one_hot(labels: Tensor, nb_classes: int):
+    """
+    Converts labels values into one-hot vector.
+
+    Params:
+        labels: tensor of label values (shape [x, y, 1])
+        nb_classes: number of classes
+
+    Returns:
+        one-hot encoded vector (shape [x, y, nb_classes])
+
+    """
+    labels_xy = tf.squeeze(tf.cast(labels, tf.int32), axis=-1)  # shape [x, y]
+    return tf.one_hot(labels_xy, depth=nb_classes)  # shape [x, y, nb_classes]
\ No newline at end of file
-- 
GitLab


From b6e3d2b60a9d40c385f87b8e7ac606f58e1cfa5f Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Wed, 16 Aug 2023 10:28:55 +0200
Subject: [PATCH 07/23] ADD: easy one-hot encoding

---
 otbtf/ops.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/otbtf/ops.py b/otbtf/ops.py
index 3c65b4b7..4a8d0b96 100644
--- a/otbtf/ops.py
+++ b/otbtf/ops.py
@@ -29,7 +29,7 @@ import tensorflow as tf
 
 
 Tensor = Any
-Scalars = List[float] | Tuple[Float]
+Scalars = List[float] | Tuple[float]
 def one_hot(labels: Tensor, nb_classes: int):
     """
     Converts labels values into one-hot vector.
-- 
GitLab


From 39c85aff0e3f3c88a83587e25b93a310c397cbc4 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Wed, 16 Aug 2023 10:30:11 +0200
Subject: [PATCH 08/23] ADD: keras layers for argmax, max, binary dilated mask

---
 otbtf/layers.py | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/otbtf/layers.py b/otbtf/layers.py
index 6da80bd7..38f50f9f 100644
--- a/otbtf/layers.py
+++ b/otbtf/layers.py
@@ -28,10 +28,10 @@ import tensorflow as tf
 
 
 Tensor = Any
-Scalars = List[float] | Tuple[Float]
+Scalars = List[float] | Tuple[float]
 
 
-class DilatedMask(keras.layers.Layer):
+class DilatedMask(tf.keras.layers.Layer):
     """Layer to dilate a binary mask."""
     def __init__(self, nodata_value: float, radius: int, name: str = None):
         """
@@ -70,7 +70,7 @@ class DilatedMask(keras.layers.Layer):
         return tf.cast(conv2d_out, tf.uint8)
 
 
-class ApplyMask(keras.layers.Layer):
+class ApplyMask(tf.keras.layers.Layer):
     """Layer to apply a binary mask to one input."""
     def __init__(self, out_nodata: float, name: str = None):
         """
@@ -95,7 +95,7 @@ class ApplyMask(keras.layers.Layer):
         return tf.where(mask == 1, float(self.out_nodata), inp)
 
 
-class ScalarsTile(keras.layers.Layer):
+class ScalarsTile(tf.keras.layers.Layer):
     """
     Layer to duplicate some scalars in a whole array.
     Simple example with only one scalar = 0.152:
@@ -127,7 +127,7 @@ class ScalarsTile(keras.layers.Layer):
         return tf.tile(inp, [1, tf.shape(ref)[1], tf.shape(ref)[2], 1])
 
 
-class Argmax(keras.layers.Layer):
+class Argmax(tf.keras.layers.Layer):
     """
     Layer to compute the argmax of a tensor.
 
@@ -160,7 +160,7 @@ class Argmax(keras.layers.Layer):
         return tf.expand_dims(tf.math.argmax(inputs, axis=-1), axis=-1)
 
 
-class Max(keras.layers.Layer):
+class Max(tf.keras.layers.Layer):
     """
     Layer to compute the max of a tensor.
 
-- 
GitLab


From 0c8cc020ac2186c244187d1aa4c74a0e597e0130 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Wed, 16 Aug 2023 10:30:44 +0200
Subject: [PATCH 09/23] ADD: keras layers for argmax, max, binary dilated mask

---
 otbtf/layers.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/otbtf/layers.py b/otbtf/layers.py
index 38f50f9f..a3680421 100644
--- a/otbtf/layers.py
+++ b/otbtf/layers.py
@@ -19,7 +19,7 @@
 # ==========================================================================*/
 """
 [Source code :fontawesome-brands-github:](https://github.com/remicres/otbtf/
-tree/master/otbtf/ops.py){ .md-button }
+tree/master/otbtf/layers.py){ .md-button }
 
 The utils module provides some useful keras layers to build deep nets.
 """
-- 
GitLab


From 9ca8ad35480477b3e136315f9fd51bed6ca2465b Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Wed, 16 Aug 2023 10:43:32 +0200
Subject: [PATCH 10/23] ADD: otbtf.ops, otbtf.layers

---
 otbtf/__init__.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/otbtf/__init__.py b/otbtf/__init__.py
index 04ac11db..cfbcecb4 100644
--- a/otbtf/__init__.py
+++ b/otbtf/__init__.py
@@ -33,4 +33,5 @@ except ImportError:
 
 from otbtf.tfrecords import TFRecords  # noqa
 from otbtf.model import ModelBase  # noqa
+from otbtf import layers, ops  # noqa
 __version__ = pkg_resources.require("otbtf")[0].version
-- 
GitLab


From 0b8f9f9e2a3de9625236e955382b9285026bb4df Mon Sep 17 00:00:00 2001
From: Cresson Remi <remi.cresson@irstea.fr>
Date: Thu, 17 Aug 2023 11:04:45 +0200
Subject: [PATCH 11/23] DOC: change the input patch size in the example

---
 doc/api_tutorial.md | 75 ++++++++++++++++++++++-----------------------
 1 file changed, 36 insertions(+), 39 deletions(-)

diff --git a/doc/api_tutorial.md b/doc/api_tutorial.md
index 8c95190c..ef0e2c16 100644
--- a/doc/api_tutorial.md
+++ b/doc/api_tutorial.md
@@ -258,10 +258,7 @@ and the estimated values.
         out_tconv1 = _tconv(out_conv4, 64, "tconv1") + out_conv3
         out_tconv2 = _tconv(out_tconv1, 32, "tconv2") + out_conv2
         out_tconv3 = _tconv(out_tconv2, 16, "tconv3") + out_conv1
-        out_tconv4 = _tconv(out_tconv3, N_CLASSES, "classifier", None)
-
-        softmax_op = tf.keras.layers.Softmax(name=OUTPUT_SOFTMAX_NAME)
-        predictions = softmax_op(out_tconv4)
+        predictions = _tconv(out_tconv3, N_CLASSES, OUTPUT_SOFTMAX_NAME, "softmax")
 
         return {TARGET_NAME: predictions}
 
@@ -375,36 +372,36 @@ polluted by the convolutional padding.
 For a 2D convolution of stride \(s\) and kernel size \(k\), we can deduce the 
 valid output size \(y\) from input size \(x\) using this expression:
 $$
-y = \left[\frac{x - k + 1}{s}\right]
+y = \left[\frac{x - k }{s}\right] + 1
 $$
 For a 2D transposed convolution of stride \(s\) and kernel size \(k\), we can 
 deduce the valid output size \(y\) from input size \(x\) using this expression:
 $$
-y = (x * s) - k + 1
+y = x * s - k + 2
 $$
 
-Let's consider a chunk of input image of size 128, and check the valid output 
+Let's consider a chunk of input image of size 64, and check the valid output 
 size of our model:
 
 | Conv. name     | Conv. type        | Kernel | Stride | Out. size | Valid out. size |
 |----------------|-------------------|--------|--------|-----------|-----------------|
-| *input*        | /                 | /      | /      | 128       | 128             |
-| *conv1*        | Conv2D            | 3      | 2      | 64        | 63              |
-| *conv2*        | Conv2D            | 3      | 2      | 32        | 30              |
-| *conv3*        | Conv2D            | 3      | 2      | 16        | 14              |
-| *conv4*        | Conv2D            | 3      | 2      | 8         | 6               |
-| *tconv1*       | Transposed Conv2D | 3      | 2      | 16        | 10              |
-| *tconv2*       | Transposed Conv2D | 3      | 2      | 32        | 18              |
-| *tconv3*       | Transposed Conv2D | 3      | 2      | 64        | 34              |
-| *classifier*   | Transposed Conv2D | 3      | 2      | 128       | 66              |
+| *input*        | /                 | /      | /      | 64        | 64              |
+| *conv1*        | Conv2D            | 3      | 2      | 32        | 31              |
+| *conv2*        | Conv2D            | 3      | 2      | 16        | 15              |
+| *conv3*        | Conv2D            | 3      | 2      | 8         | 7               |
+| *conv4*        | Conv2D            | 3      | 2      | 4         | 3               |
+| *tconv1*       | Transposed Conv2D | 3      | 2      | 8         | 5               |
+| *tconv2*       | Transposed Conv2D | 3      | 2      | 16        | 9               |
+| *tconv3*       | Transposed Conv2D | 3      | 2      | 32        | 17              |
+| *classifier*   | Transposed Conv2D | 3      | 2      | 64        | 33              |
 
 This shows that our model can be applied in a fully convolutional fashion 
 without generating blocking artifacts, using the central part of the output of 
-size 66. This is equivalent to remove \((128 - 66)/2 = 31\) pixels from 
+size 33. This is equivalent to remove \((64 - 33)/2 = 15\) pixels from 
 the borders of the output. We keep the upper nearest power of 2 to keep the 
-convolutions consistent between two adjacent image chunks, hence we can remove 32 
-pixels from the borders. We can hence use the output cropped with **32** pixels, 
-named ***predictions_crop32*** in the model outputs.
+convolutions consistent between two adjacent image chunks, hence we can remove 16 
+pixels from the borders. We can hence use the output cropped with **16** pixels, 
+named ***predictions_crop16*** in the model outputs.
 By default, cropped outputs in `otbtf.ModelBase` are generated for the following 
 values: `[16, 32, 64, 96, 128]` but that can be changed setting `inference_cropping` 
 in the model `__init__()` (see the reference API documentation for details).
@@ -434,10 +431,10 @@ In the following subsections, we run `TensorflowModelServe` over the input
 image, with the following parameters:
 
 - the input name is ***input_xs***
-- the output name is ***predictions_crop32*** (cropping margin of 32 pixels)
-- we choose a receptive field of ***128*** and an expression field of 
-***64*** so that they match the cropping margin of 32 pixels (since we remove 
-32 pixels from each side in x and y dimensions, we remove a total of 64 pixels 
+- the output name is ***predictions_crop16*** (cropping margin of 16 pixels)
+- we choose a receptive field of ***64*** and an expression field of 
+***32*** so that they match the cropping margin of 16 pixels (since we remove 
+16 pixels from each side in x and y dimensions, we remove a total of 32 pixels 
 from each borders in x/y dimensions). 
 
 ### Command Line Interface
@@ -447,14 +444,14 @@ Open a terminal and run the following command:
 ```commandline
 otbcli_TensorflowModelServe \
 -source1.il $DATADIR/fake_spot6.jp2 \
--source1.rfieldx 128 \ 
--source1.rfieldy 128 \
+-source1.rfieldx 64 \ 
+-source1.rfieldy 64 \
 -source1.placeholder "input_xs" \
 -model.dir /tmp/my_1st_savedmodel \
 -model.fullyconv on \
--output.names "predictions_crop32" \
--output.efieldx 64 \
--output.efieldy 64 \
+-output.names "predictions_crop16" \
+-output.efieldx 32 \
+-output.efieldy 32 \
 -out softmax.tif
 ```
 
@@ -467,14 +464,14 @@ python wrapper:
 import otbApplication
 app = otbApplication.Registry.CreateApplication("TensorflowModelServe")
 app.SetParameterStringList("source1.il", ["fake_spot6.jp2"])
-app.SetParameterInt("source1.rfieldx", 128)
-app.SetParameterInt("source1.rfieldy", 128)
+app.SetParameterInt("source1.rfieldx", 64)
+app.SetParameterInt("source1.rfieldy", 64)
 app.SetParameterString("source1.placeholder", "input_xs")
 app.SetParameterString("model.dir", "/tmp/my_1st_savedmodel")
 app.EnableParameter("fullyconv")
-app.SetParameterStringList("output.names", ["predictions_crop32"])
-app.SetParameterInt("output.efieldx", 64)
-app.SetParameterInt("output.efieldy", 64)
+app.SetParameterStringList("output.names", ["predictions_crop16"])
+app.SetParameterInt("output.efieldx", 32)
+app.SetParameterInt("output.efieldy", 32)
 app.SetParameterString("out", "softmax.tif")
 app.ExecuteAndWriteOutput()
 ```
@@ -487,14 +484,14 @@ Using PyOTB is nicer:
 import pyotb
 pyotb.TensorflowModelServe({
     "source1.il": "fake_spot6.jp2",
-    "source1.rfieldx": 128,
-    "source1.rfieldy": 128,
+    "source1.rfieldx": 64,
+    "source1.rfieldy": 64,
     "source1.placeholder": "input_xs",
     "model.dir": "/tmp/my_1st_savedmodel",
     "fullyconv": True,
-    "output.names": ["predictions_crop32"],
-    "output.efieldx": 64,
-    "output.efieldy": 64,
+    "output.names": ["predictions_crop16"],
+    "output.efieldx": 32,
+    "output.efieldy": 32,
     "out": "softmax.tif",
 })
 ```
-- 
GitLab


From 46bb19e51c2b394a2b20ef4040d63a63ef98889e Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Tue, 22 Aug 2023 20:06:08 +0200
Subject: [PATCH 12/23] ENH: option to expand dims in otbtf.Argmax

---
 otbtf/layers.py | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/otbtf/layers.py b/otbtf/layers.py
index a3680421..8fc76deb 100644
--- a/otbtf/layers.py
+++ b/otbtf/layers.py
@@ -136,13 +136,15 @@ class Argmax(tf.keras.layers.Layer):
     Useful to transform a softmax into a "categorical" map for instance.
 
     """
-    def __init__(self, name: str = None):
+    def __init__(self, name: str = None, expand_last_dim: bool = True):
         """
         Params:
             name: layer name
+            expand_last_dim: expand the last dimension when True
 
         """
         super().__init__(name=name)
+        self.expand_last_dim = expand_last_dim
 
     def call(self, inputs):
         """
@@ -157,7 +159,10 @@ class Argmax(tf.keras.layers.Layer):
             (nb_classes - 1).
 
         """
-        return tf.expand_dims(tf.math.argmax(inputs, axis=-1), axis=-1)
+        argmax = tf.math.argmax(inputs, axis=-1)
+        if self.expand_last_dim:
+            return tf.expand_dims(argmax, axis=-1)
+        return argmax
 
 
 class Max(tf.keras.layers.Layer):
-- 
GitLab


From 887fb17332dd485edb528c4a53d2d030e3ca7b31 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Tue, 22 Aug 2023 20:08:01 +0200
Subject: [PATCH 13/23] STY: linting

---
 otbtf/layers.py | 2 +-
 otbtf/ops.py    | 4 +++-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/otbtf/layers.py b/otbtf/layers.py
index 8fc76deb..ef65ec1c 100644
--- a/otbtf/layers.py
+++ b/otbtf/layers.py
@@ -55,7 +55,7 @@ class DilatedMask(tf.keras.layers.Layer):
         nodata_mask = tf.cast(tf.math.equal(inp, self.nodata_value), tf.uint8)
 
         se_size = 1 + 2 * self.radius
-        # Create a morphological kernel suitable for binary dilatation, see 
+        # Create a morphological kernel suitable for binary dilatation, see
         # https://stackoverflow.com/q/54686895/13711499
         kernel = tf.zeros((se_size, se_size, 1), dtype=tf.uint8)
         conv2d_out = tf.nn.dilation2d(
diff --git a/otbtf/ops.py b/otbtf/ops.py
index 4a8d0b96..ef5c52b9 100644
--- a/otbtf/ops.py
+++ b/otbtf/ops.py
@@ -30,6 +30,8 @@ import tensorflow as tf
 
 Tensor = Any
 Scalars = List[float] | Tuple[float]
+
+
 def one_hot(labels: Tensor, nb_classes: int):
     """
     Converts labels values into one-hot vector.
@@ -43,4 +45,4 @@ def one_hot(labels: Tensor, nb_classes: int):
 
     """
     labels_xy = tf.squeeze(tf.cast(labels, tf.int32), axis=-1)  # shape [x, y]
-    return tf.one_hot(labels_xy, depth=nb_classes)  # shape [x, y, nb_classes]
\ No newline at end of file
+    return tf.one_hot(labels_xy, depth=nb_classes)  # shape [x, y, nb_classes]
-- 
GitLab


From 04010c5eb837b780ae8e4e3ed5025475b3c08de4 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Sat, 26 Aug 2023 19:56:29 +0200
Subject: [PATCH 14/23] CI: test decloud

---
 .gitlab-ci.yml | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index b1dff867..e2f836b2 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -164,7 +164,7 @@ ctest:
   extends: .tests_base
   stage: Applications Test
   before_script:
-    - pip3 install pytest pytest-cov pytest-order
+    - pip install pytest pytest-cov pytest-order
     - mkdir -p $ARTIFACT_TEST_DIR
     - cd $CI_PROJECT_DIR
 
@@ -189,6 +189,15 @@ sr4rs:
     - export PYTHONPATH=$PYTHONPATH:$PWD/sr4rs
     - python -m pytest --junitxml=$ARTIFACT_TEST_DIR/report_sr4rs.xml $OTBTF_SRC/test/sr4rs_unittest.py
 
+decloud:
+  extends: .applications_test_base
+  script:
+    - git clone https://github.com/CNES/decloud.git
+    - pip install -r $PWD/decloud/docker/requirements.txt
+    - wget -P decloud_data --no-verbose --recursive --level=inf --no-parent -R "index.html*" --cut-dirs=3 --no-host-directories http://indexof.montpellier.irstea.priv/projets/geocicd/decloud/
+    - export DECLOUD_DATA_DIR="$PWD/decloud_data"
+    - pytest decloud/tests/train_from_tfrecords_unittest.py
+
 otbtf_api:
   extends: .applications_test_base
   script:
-- 
GitLab


From 541a27d2ec30f24afa512a496b12647d846bb388 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Sat, 26 Aug 2023 20:16:35 +0200
Subject: [PATCH 15/23] CI: test decloud

---
 .gitlab-ci.yml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index e2f836b2..fd288b31 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -197,6 +197,7 @@ decloud:
     - wget -P decloud_data --no-verbose --recursive --level=inf --no-parent -R "index.html*" --cut-dirs=3 --no-host-directories http://indexof.montpellier.irstea.priv/projets/geocicd/decloud/
     - export DECLOUD_DATA_DIR="$PWD/decloud_data"
     - pytest decloud/tests/train_from_tfrecords_unittest.py
+    - pytest decloud/tests/create_tfrecords_unittest.py
 
 otbtf_api:
   extends: .applications_test_base
-- 
GitLab


From ac136d1a93784a385143daa39af89f41ae1228e1 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Sat, 26 Aug 2023 20:16:56 +0200
Subject: [PATCH 16/23] DOC: explain how to append additional outputs

---
 doc/api_tutorial.md                           | 13 +++++++++
 .../tensorflow_v2x/fcnn/fcnn_model.py         | 27 +++++++++++++++++--
 2 files changed, 38 insertions(+), 2 deletions(-)

diff --git a/doc/api_tutorial.md b/doc/api_tutorial.md
index ef0e2c16..cc08b919 100644
--- a/doc/api_tutorial.md
+++ b/doc/api_tutorial.md
@@ -184,6 +184,19 @@ def dataset_preprocessing_fn(examples: dict):
 
 As you can see, we don't modify the input tensor, since we want to use it 
 as it in the model.
+Note that since version 4.2.0 the `otbtf.ops.one_hot` can ease the transform:
+
+```python
+def dataset_preprocessing_fn(examples: dict):
+    return {
+        INPUT_NAME: examples["input_xs_patches"],
+        TARGET_NAME: otbtf.ops.one_hot(
+            labels=examples["labels_patches"],
+            nb_classes=N_CLASSES
+        )
+}
+
+```
 
 ### Model inputs preprocessing
 
diff --git a/otbtf/examples/tensorflow_v2x/fcnn/fcnn_model.py b/otbtf/examples/tensorflow_v2x/fcnn/fcnn_model.py
index 44285d92..fcd14a20 100644
--- a/otbtf/examples/tensorflow_v2x/fcnn/fcnn_model.py
+++ b/otbtf/examples/tensorflow_v2x/fcnn/fcnn_model.py
@@ -123,6 +123,18 @@ class FCNNModel(ModelBase):
         softmax_op = tf.keras.layers.Softmax(name=OUTPUT_SOFTMAX_NAME)
         predictions = softmax_op(out_tconv4)
 
+        # note that we could also add additional outputs, for instance the
+        # argmax of the softmax:
+        #
+        # argmax_op = otbtf.layers.Argmax(name="labels")
+        # labels = argmax_op(predictions)
+        # return {TARGET_NAME: predictions, OUTPUT_ARGMAX_NAME: labels}
+        # The default extra outputs (i.e. output tensors with cropping in
+        # physical domain) are append by `otbtf.ModelBase` for all returned
+        # outputs of this function to be used at inference time (e.g.
+        # "labels_crop32", "labels_crop64", ...,
+        # "predictions_softmax_tensor_crop16", ..., etc).
+
         return {TARGET_NAME: predictions}
 
 
@@ -173,12 +185,23 @@ def train(params, ds_train, ds_valid, ds_test):
         model = FCNNModel(dataset_element_spec=ds_train.element_spec)
 
         # Compile the model
+        # It is a good practice to use a `dict` to explicitly name the outputs
+        # over which the losses/metrics are computed.
+        # This ensures a better optimization control, and also avoids lots of
+        # useless outputs (e.g. metrics computed over extra outputs).
         model.compile(
-            loss=tf.keras.losses.CategoricalCrossentropy(),
+            loss={
+                TARGET_NAME: tf.keras.losses.CategoricalCrossentropy()
+            },
             optimizer=tf.keras.optimizers.Adam(
                 learning_rate=params.learning_rate
             ),
-            metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()]
+            metrics={
+                TARGET_NAME: [
+                    tf.keras.metrics.Precision(class_id=1),
+                    tf.keras.metrics.Recall(class_id=1)
+                ]
+            }
         )
 
         # Summarize the model (in CLI)
-- 
GitLab


From 87a734f012730920d96117dff4f21546f5e5c3bf Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Sat, 26 Aug 2023 20:45:27 +0200
Subject: [PATCH 17/23] CI: test decloud

---
 .gitlab-ci.yml | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index fd288b31..06d8ba1d 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -196,8 +196,9 @@ decloud:
     - pip install -r $PWD/decloud/docker/requirements.txt
     - wget -P decloud_data --no-verbose --recursive --level=inf --no-parent -R "index.html*" --cut-dirs=3 --no-host-directories http://indexof.montpellier.irstea.priv/projets/geocicd/decloud/
     - export DECLOUD_DATA_DIR="$PWD/decloud_data"
-    - pytest decloud/tests/train_from_tfrecords_unittest.py
-    - pytest decloud/tests/create_tfrecords_unittest.py
+    - cd decloud
+    - pytest tests/train_from_tfrecords_unittest.py
+    - pytest tests/create_tfrecords_unittest.py
 
 otbtf_api:
   extends: .applications_test_base
-- 
GitLab


From 24dc7c01d617fac393518cb9f66dec0ca0208f0c Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Sat, 26 Aug 2023 21:14:23 +0200
Subject: [PATCH 18/23] CI: test decloud

---
 .gitlab-ci.yml | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 06d8ba1d..e2f836b2 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -196,9 +196,7 @@ decloud:
     - pip install -r $PWD/decloud/docker/requirements.txt
     - wget -P decloud_data --no-verbose --recursive --level=inf --no-parent -R "index.html*" --cut-dirs=3 --no-host-directories http://indexof.montpellier.irstea.priv/projets/geocicd/decloud/
     - export DECLOUD_DATA_DIR="$PWD/decloud_data"
-    - cd decloud
-    - pytest tests/train_from_tfrecords_unittest.py
-    - pytest tests/create_tfrecords_unittest.py
+    - pytest decloud/tests/train_from_tfrecords_unittest.py
 
 otbtf_api:
   extends: .applications_test_base
-- 
GitLab


From 00165735bccecf119f2d7a45c43c4d06aa507d68 Mon Sep 17 00:00:00 2001
From: Cresson Remi <remi.cresson@irstea.fr>
Date: Tue, 12 Sep 2023 21:23:09 +0200
Subject: [PATCH 19/23] COMP: update module version

---
 setup.py | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/setup.py b/setup.py
index 1feeff9c..3222afc4 100644
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
 
 setuptools.setup(
     name="otbtf",
-    version="4.1.0",
+    version="4.2.0",
     author="Remi Cresson",
     author_email="remi.cresson@inrae.fr",
     description="OTBTF: Orfeo ToolBox meets TensorFlow",
@@ -14,11 +14,9 @@ setuptools.setup(
     long_description_content_type="text/markdown",
     url="https://gitlab.irstea.fr/remi.cresson/otbtf",
     classifiers=[
-        "Programming Language :: Python :: 3",
-        "Programming Language :: Python :: 3.6",
-        "Programming Language :: Python :: 3.7",
         "Programming Language :: Python :: 3.8",
         "Programming Language :: Python :: 3.9",
+        "Programming Language :: Python :: 3.10",
         "Topic :: Scientific/Engineering :: GIS",
         "Topic :: Scientific/Engineering :: Image Processing",
         "License :: OSI Approved :: Apache Software License",
-- 
GitLab


From bd6eb0837a5abe103b0d9f8f68f3796c265581b9 Mon Sep 17 00:00:00 2001
From: Cresson Remi <remi.cresson@irstea.fr>
Date: Tue, 12 Sep 2023 21:31:28 +0200
Subject: [PATCH 20/23] DOC: update release notes

---
 RELEASE_NOTES.txt | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt
index 9aa374e6..97a790cd 100644
--- a/RELEASE_NOTES.txt
+++ b/RELEASE_NOTES.txt
@@ -1,3 +1,11 @@
+Version 4.2.0 (12 sep 2023)
+----------------------------------------------------------------
+* Add new python modules: `otbtf.layers` (with new classes `DilatedMask`, `ApplyMask`, `ScalarsTile`, `ArgMax`, `Max`) and `otbtf.ops` (`one_hot()`)
+* Fix an error in the documentation
+* Update the otbtf-keras tutorial
+* Add decloud testing in CI
+* Fix protobuf version in dockerfile (see https://github.com/tensorflow/tensorflow/issues/61551)
+
 Version 4.1.0 (23 may 2023)
 ----------------------------------------------------------------
 * Add no-data values support for inference in TensorflowModelServe application
-- 
GitLab


From f022c94e171f8291ca9a38c90821b87c0585e008 Mon Sep 17 00:00:00 2001
From: Cresson Remi <remi.cresson@irstea.fr>
Date: Tue, 12 Sep 2023 21:33:24 +0200
Subject: [PATCH 21/23] DOC: update readme

---
 README.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index ec51714c..585e8022 100644
--- a/README.md
+++ b/README.md
@@ -33,8 +33,8 @@ The documentation is available on [otbtf.readthedocs.io](https://otbtf.readthedo
 You can use our latest GPU enabled docker images.
 
 ```bash
-docker run --runtime=nvidia -ti mdl4eo/otbtf:4.1.0-gpu otbcli_PatchesExtraction
-docker run --runtime=nvidia -ti mdl4eo/otbtf:4.1.0-gpu python -c "import otbtf"
+docker run --runtime=nvidia -ti mdl4eo/otbtf:4.2.0-gpu otbcli_PatchesExtraction
+docker run --runtime=nvidia -ti mdl4eo/otbtf:4.2.0-gpu python -c "import otbtf"
 ```
 
 You can also build OTBTF from sources (see the documentation)
-- 
GitLab


From 888712351da377ebe7375895b2b269095966ff16 Mon Sep 17 00:00:00 2001
From: Cresson Remi <remi.cresson@irstea.fr>
Date: Tue, 12 Sep 2023 21:39:00 +0200
Subject: [PATCH 22/23] DOC: docker snippets

---
 .gitlab-ci.yml    |  2 +-
 doc/docker_use.md | 28 +++++++++++++++++-----------
 otbtf/__init__.py |  2 +-
 3 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index e2f836b2..358a167c 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,5 +1,5 @@
 variables:
-  OTBTF_VERSION: 4.1.0
+  OTBTF_VERSION: 4.2.0
   OTB_BUILD: /src/otb/build/OTB/build  # Local OTB build directory
   OTBTF_SRC: /src/otbtf  # Local OTBTF source directory
   OTB_TEST_DIR: $OTB_BUILD/Testing/Temporary  # OTB testing directory
diff --git a/doc/docker_use.md b/doc/docker_use.md
index 0c343fef..f7b81683 100644
--- a/doc/docker_use.md
+++ b/doc/docker_use.md
@@ -5,13 +5,13 @@ We recommend to use OTBTF from official docker images.
 Latest CPU-only docker image:
 
 ```commandline
-docker pull mdl4eo/otbtf:4.1.0-cpu
+docker pull mdl4eo/otbtf:4.2.0-cpu
 ```
 
 Latest GPU-ready docker image:
 
 ```commandline
-docker pull mdl4eo/otbtf:4.1.0-gpu
+docker pull mdl4eo/otbtf:4.2.0-gpu
 ```
 
 Read more in the following sections.
@@ -25,12 +25,12 @@ Since OTBTF >= 3.2.1 you can find the latest docker images on
 
 | Name                                                                               | Os            | TF    | OTB   | Description            | Dev files | Compute capability |
 |------------------------------------------------------------------------------------| ------------- |-------|-------| ---------------------- | --------- | ------------------ |
-| **mdl4eo/otbtf:4.1.0-cpu**                                                         | Ubuntu Jammy  | r2.12 | 8.1.0 | CPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
-| **mdl4eo/otbtf:4.1.0-cpu-dev**                                                     | Ubuntu Jammy  | r2.12 | 8.1.0 | CPU, no optimization (dev) |  yes  | 5.2,6.1,7.0,7.5,8.6|
-| **mdl4eo/otbtf:4.1.0-gpu**                                                         | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
-| **mdl4eo/otbtf:4.1.0-gpu-dev**                                                     | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU, no optimization (dev) | yes   | 5.2,6.1,7.0,7.5,8.6|
-| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.1.0-gpu-opt**     | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU with opt.          | no        | 5.2,6.1,7.0,7.5,8.6|
-| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.1.0-gpu-opt-dev** | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU with opt. (dev)    | yes       | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.2.0-cpu**                                                         | Ubuntu Jammy  | r2.12 | 8.1.0 | CPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.2.0-cpu-dev**                                                     | Ubuntu Jammy  | r2.12 | 8.1.0 | CPU, no optimization (dev) |  yes  | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.2.0-gpu**                                                         | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.2.0-gpu-dev**                                                     | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU, no optimization (dev) | yes   | 5.2,6.1,7.0,7.5,8.6|
+| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.2.0-gpu-opt**     | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU with opt.          | no        | 5.2,6.1,7.0,7.5,8.6|
+| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.2.0-gpu-opt-dev** | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU with opt. (dev)    | yes       | 5.2,6.1,7.0,7.5,8.6|
 
 The list of older releases is available [here](#older-images).
 
@@ -51,13 +51,13 @@ You can then use the OTBTF `gpu` tagged docker images with the **NVIDIA runtime*
 With Docker version earlier than 19.03 :
 
 ```bash
-docker run --runtime=nvidia -ti mdl4eo/otbtf:4.1.0-gpu bash
+docker run --runtime=nvidia -ti mdl4eo/otbtf:4.2.0-gpu bash
 ```
 
 With Docker version including and after 19.03 :
 
 ```bash
-docker run --gpus all -ti mdl4eo/otbtf:4.1.0-gpu bash
+docker run --gpus all -ti mdl4eo/otbtf:4.2.0-gpu bash
 ```
 
 You can find some details on the **GPU docker image** and some **docker tips 
@@ -80,7 +80,7 @@ See here how to install docker on Ubuntu
 1. Install [WSL2](https://docs.microsoft.com/en-us/windows/wsl/install-win10#manual-installation-steps) (Windows Subsystem for Linux)
 2. Install [docker desktop](https://www.docker.com/products/docker-desktop)
 3. Start **docker desktop** and **enable WSL2** from *Settings* > *General* then tick the box *Use the WSL2 based engine*
-3. Open a **cmd.exe** or **PowerShell** terminal, and type `docker create --name otbtf-cpu --interactive --tty mdl4eo/otbtf:4.1.0-cpu`
+3. Open a **cmd.exe** or **PowerShell** terminal, and type `docker create --name otbtf-cpu --interactive --tty mdl4eo/otbtf:4.2.0-cpu`
 4. Open **docker desktop**, and check that the docker is running in the **Container/Apps** menu
 ![Docker desktop, after the docker image is downloaded and ready to use](images/docker_desktop_1.jpeg)
 5. From **docker desktop**, click on the icon highlighted as shown below, and use the bash terminal that should pop up!
@@ -166,4 +166,10 @@ Here you can find the list of older releases of OTBTF:
 | **mdl4eo/otbtf:4.0.0-gpu-dev**                                                     | Ubuntu Jammy  | r2.12  | 8.1.0 | GPU, no optimization (dev) | yes   | 5.2,6.1,7.0,7.5,8.6|
 | **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.0.0-gpu-opt**     | Ubuntu Jammy  | r2.12  | 8.1.0 | GPU with opt.          | no        | 5.2,6.1,7.0,7.5,8.6|
 | **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.0.0-gpu-opt-dev** | Ubuntu Jammy  | r2.12  | 8.1.0 | GPU with opt. (dev)    | yes       | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.1.0-cpu**                                                         | Ubuntu Jammy  | r2.12 | 8.1.0 | CPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.1.0-cpu-dev**                                                     | Ubuntu Jammy  | r2.12 | 8.1.0 | CPU, no optimization (dev) |  yes  | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.1.0-gpu**                                                         | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf:4.1.0-gpu-dev**                                                     | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU, no optimization (dev) | yes   | 5.2,6.1,7.0,7.5,8.6|
+| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.1.0-gpu-opt**     | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU with opt.          | no        | 5.2,6.1,7.0,7.5,8.6|
+| **gitlab.irstea.fr/remi.cresson/otbtf/container_registry/otbtf:4.1.0-gpu-opt-dev** | Ubuntu Jammy  | r2.12 | 8.1.0 | GPU with opt. (dev)    | yes       | 5.2,6.1,7.0,7.5,8.6|
 
diff --git a/otbtf/__init__.py b/otbtf/__init__.py
index cfbcecb4..1ce62422 100644
--- a/otbtf/__init__.py
+++ b/otbtf/__init__.py
@@ -2,7 +2,7 @@
 # ==========================================================================
 #
 #   Copyright 2018-2019 IRSTEA
-#   Copyright 2020-2022 INRAE
+#   Copyright 2020-2023 INRAE
 #
 #   Licensed under the Apache License, Version 2.0 (the "License");
 #   you may not use this file except in compliance with the License.
-- 
GitLab


From cb9b8b175d89126457fd6e992f34efa228a22bea Mon Sep 17 00:00:00 2001
From: Cresson Remi <remi.cresson@irstea.fr>
Date: Tue, 12 Sep 2023 21:41:37 +0200
Subject: [PATCH 23/23] Update doc/docker_troubleshooting.md

---
 doc/docker_troubleshooting.md | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/doc/docker_troubleshooting.md b/doc/docker_troubleshooting.md
index c34b7d2d..4aa0d506 100644
--- a/doc/docker_troubleshooting.md
+++ b/doc/docker_troubleshooting.md
@@ -52,13 +52,13 @@ sudo service docker {status,enable,disable,start,stop,restart}
 Run a simple command in a one-shot container:
 
 ```bash
-docker run mdl4eo/otbtf:3.4.0-cpu otbcli_PatchesExtraction
+docker run mdl4eo/otbtf:4.2.0-cpu otbcli_PatchesExtraction
 ```
 
 You can also use the image in interactive mode with bash:
 
 ```bash
-docker run -ti mdl4eo/otbtf:3.4.0-cpu bash
+docker run -ti mdl4eo/otbtf:4.2.0-cpu bash
 ```
 
 ### Mounting file systems
@@ -70,7 +70,7 @@ to use inside the container:
 The following command shows you how to access the folder from the docker image.
 
 ```bash
-docker run -v /mnt/disk1/:/data/ -ti mdl4eo/otbtf:3.4.0-cpu bash -c "ls /data"
+docker run -v /mnt/disk1/:/data/ -ti mdl4eo/otbtf:4.2.0-cpu bash -c "ls /data"
 ```
 Beware of ownership issues! see the last section of this doc.
 
@@ -81,7 +81,7 @@ any directory.
 
 ```bash
 docker create --interactive --tty --volume /home/$USER:/home/otbuser/ \
-    --name otbtf mdl4eo/otbtf:3.4.0-cpu /bin/bash
+    --name otbtf mdl4eo/otbtf:4.2.0-cpu /bin/bash
 ```
 
 !!! warning
@@ -160,7 +160,7 @@ automatically pull image
 
 ```bash
 docker create --interactive --tty --volume /home/$USER:/home/otbuser \
-    --name otbtf mdl4eo/otbtf:3.4.0-cpu /bin/bash
+    --name otbtf mdl4eo/otbtf:4.2.0-cpu /bin/bash
 ```
 
 Start a background container process:
-- 
GitLab