diff --git a/converter/main.py b/converter/main.py
index e3b11f9a3a5461a2a815a5447f355508752ced13..40ecfdae38e86c797b2fea371f870e465b998282 100644
--- a/converter/main.py
+++ b/converter/main.py
@@ -127,6 +127,7 @@ class OPTYPE(IntEnum):
     Resize = (24,)
     Compare = (25,)
     Where = (26,)
+    Minimum = (27,)
 
     # "BatchMatMulV2" did not exist in Tensorflow 1.9. It exists in
     # Tensorflow 1.15.
@@ -681,8 +682,8 @@ def parse_graph_node(
         myGraph[node.output[0]]["additional"] = {}
         myGraph[node.output[0]]["additional"]["data"] = node
         map_onnx_to_myGraph[node.output[0]] = node.output[0]
-
-    elif node.op_type == "Identity":
+    
+    elif node.op_type == "Identity" or node.op_type == "Cast":
         myGraph[node.output[0]] = {}
         myGraph[node.output[0]]["op_type"] = OPTYPE.Identity
         myGraph[node.output[0]]["inputs"] = [map_onnx_to_myGraph[n0name]]
@@ -831,6 +832,17 @@ def parse_graph_node(
         myGraph[node.output[0]]["additional"]["data"] = node
         map_onnx_to_myGraph[node.output[0]] = node.output[0]
 
+    elif node.op_type == "Min":
+        myGraph[node.output[0]] = {}
+        myGraph[node.output[0]]["op_type"] = OPTYPE.Minimum
+        myGraph[node.output[0]]["inputs"] = [
+            map_onnx_to_myGraph[n0name],
+            map_onnx_to_myGraph[node.input[1]],
+        ]
+        myGraph[node.output[0]]["additional"] = {}
+        myGraph[node.output[0]]["additional"]["data"] = node
+        map_onnx_to_myGraph[node.output[0]] = node.output[0]
+
     elif node.op_type == "Unsqueeze":
         # No need to parse Unsqueeze as SADL can handle it.
         map_onnx_to_myGraph[node.output[0]] = node.output[0]
diff --git a/sadl/layer.h b/sadl/layer.h
index 3b6737d1801f7cc47a14f35027e6d56594f0f526..1a8669f19608949f4f77a980bb7f2a77d4bdccb9 100644
--- a/sadl/layer.h
+++ b/sadl/layer.h
@@ -74,7 +74,8 @@ struct OperationType
     Resize             = 24,
     Compare            = 25,
     Where              = 26,
-    OperationTypeCount = 27
+    Minimum            = 27,
+    OperationTypeCount = 28
   };
 };
 
diff --git a/sadl/layer_minimum.h b/sadl/layer_minimum.h
new file mode 100644
index 0000000000000000000000000000000000000000..12b0401ff8e6a8627da38502e4e79b145d07c5c2
--- /dev/null
+++ b/sadl/layer_minimum.h
@@ -0,0 +1,180 @@
+/* The copyright in this software is being made available under the BSD
+ * License, included below. This software may be subject to other third party
+ * and contributor rights, including patent rights, and no such rights are
+ * granted under this license.
+ *
+ * Copyright (c) 2010-2024, ITU/ISO/IEC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+ *    be used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#pragma once
+#include "layer.h"
+
+namespace sadl
+{
+namespace layers
+{
+template<typename T> class Minimum : public Layer<T>
+{
+public:
+  using Layer<T>::Layer;
+  using Layer<T>::m_out;   // to avoid this->
+  using Layer<T>::m_initDone;
+
+  virtual bool apply(std::vector<Tensor<T> *> &in) override;
+  virtual bool init(const std::vector<Tensor<T> *> &in) override;
+  virtual bool mutateInput() const override { return true; }
+
+protected:
+  virtual bool loadInternal(std::istream &file, Version) override;
+};
+
+template<typename T> bool Minimum<T>::apply(std::vector<Tensor<T> *> &in)
+{
+  assert(in.size() == 2);
+  if (in[0] == in[1])
+  {
+    std::cerr << "  input aliasing" << std::endl;
+    return false;
+  }
+  const int shift = -(in[1]->quantizer - in[0]->quantizer);
+  swap(*in[0], m_out);
+
+  /*
+  Looking at the initialization, if the condition
+  below is false, necessarily, `in[1]->dims().size()`
+  is equal to 1.
+  */
+  if (in[0]->dims() == in[1]->dims())
+  {
+    for (auto it0 = m_out.begin(), it1 = in[1]->begin(); it0 != m_out.end(); ++it0, ++it1)
+    {
+      T z = *it1;
+      ComputationType<T>::shift_left(z, shift);
+      *it0 = std::min(*it0, z);
+    }
+  }
+  else
+  {
+    const Tensor<T> &B{ *in[1] };
+    if (B.size() == 1)
+    {
+      T value{ B[0] };
+      ComputationType<T>::shift_left(value, shift);
+      for (auto it0 = m_out.begin(); it0 != m_out.end(); ++it0)
+      {
+        *it0 = std::min(*it0, value);
+      }
+    }
+    else if (in[0]->dims().size() == 2)
+    {
+      const int N{ in[0]->dims()[0] };
+      const int H{ in[0]->dims()[1] };
+      for (int n = 0; n < N; ++n)
+        for (int i = 0; i < H; ++i)
+        {
+          T z = B[i];
+          ComputationType<T>::shift_left(z, shift);
+          m_out(n, i) = std::min(m_out(n, i), z);
+        }
+    }
+    else if (in[0]->dims().size() == 3)
+    {
+      const int N{ in[0]->dims()[0] };
+      const int H{ in[0]->dims()[1] };
+      const int W{ in[0]->dims()[2] };
+      for (int n = 0; n < N; ++n)
+        for (int i = 0; i < H; ++i)
+          for (int j = 0; j < W; ++j)
+          {
+            T z = B[j];
+            ComputationType<T>::shift_left(z, shift);
+            m_out(n, i, j) = std::min(m_out(n, i, j), z);
+          }
+    }
+    else if (in[0]->dims().size() == 4)
+    {
+      const int N{ in[0]->dims()[0] };
+      const int H{ in[0]->dims()[1] };
+      const int W{ in[0]->dims()[2] };
+      const int K{ in[0]->dims()[3] };
+      for (int n = 0; n < N; ++n)
+        for (int i = 0; i < H; ++i)
+          for (int j = 0; j < W; ++j)
+            for (int k = 0; k < K; ++k)
+            {
+              T z = B[k];
+              ComputationType<T>::shift_left(z, shift);
+              m_out(n, i, j, k) = std::min(m_out(n, i, j, k), z);
+            }
+    }
+  }
+  return true;
+}
+
+template<typename T> bool Minimum<T>::init(const std::vector<Tensor<T> *> &in)
+{
+  SADL_DBG(std::cout << "  - " << in[0]->dims() << ' ' << in[1]->dims() << std::endl);
+  if (in.size() != 2)
+  {
+    return false;
+  }
+
+  /*
+  Broadcasting is supported. This means that either
+  the two input Tensor<T>s have the same shape or the
+  second input Tensor<T> is a singleton or the second
+  input Tensor<T> is a vector and the last dimension
+  of the first input Tensor<T> is equal to the size
+  of the second input Tensor<T>.
+  */
+  if (in[1]->size() == 1)
+  {   // singleton
+      // ok
+  }
+  else if (in[1]->dims().size() == 1 || (in[1]->dims().size() == 2 && in[1]->dims()[0] == 1))
+  {
+    if (in[1]->size() != in[0]->dims().back())
+    {   // broadcast last tdim
+      return false;
+    }
+  }
+  else
+  {
+    if (!(in[0]->dims() == in[1]->dims()))
+    {   // same sim
+      return false;
+    }
+  }
+  m_out.resize(in[0]->dims());
+  m_initDone = true;
+  return true;
+}
+
+template<typename T> bool Minimum<T>::loadInternal(std::istream &, Version) { return true; }
+
+}   // namespace layers
+}   // namespace sadl
diff --git a/sadl/layers.h b/sadl/layers.h
index 1009eab9ab5ac9af876353566afd4328b37f3ab0..eadc2ec3406dd96daf26ed3716c21e8ebe3c1508 100644
--- a/sadl/layers.h
+++ b/sadl/layers.h
@@ -59,6 +59,7 @@
 #include "layer_resize.h"
 #include "layer_compare.h"
 #include "layer_where.h"
+#include "layer_minimum.h"
 
 namespace sadl
 {
@@ -99,6 +100,7 @@ inline std::string opName(const OperationType::Type op)
     DIRTYCASEPRINT(GridSample);
     DIRTYCASEPRINT(Resize);
     DIRTYCASEPRINT(Compare);
+    DIRTYCASEPRINT(Minimum);
   default:
     oss << "??";
     break;
diff --git a/sadl/model.h b/sadl/model.h
index ef345afb4f620a7bd9a5bb1e6b056ec04c2251dc..074fd4c554354d3b7c05c8b887fc5eec901cbc57 100644
--- a/sadl/model.h
+++ b/sadl/model.h
@@ -185,6 +185,9 @@ template<typename T> std::unique_ptr<layers::Layer<T>> createLayer(int32_t id, l
   case layers::OperationType::Where:
     return std::unique_ptr<layers::Layer<T>>(new layers::Where<T>{ id, op });
     break;
+  case layers::OperationType::Minimum:
+    return std::unique_ptr<layers::Layer<T>>(new layers::Minimum<T>{ id, op });
+    break;
   case layers::OperationType::OperationTypeCount:
     break;   // no default on purpose
   }
diff --git a/sample/copy.h b/sample/copy.h
index 951e3a53011bd3c3c674352bef7901bf5dda3b16..ec0870e137186d8573bbac1b020393b998a32392 100644
--- a/sample/copy.h
+++ b/sample/copy.h
@@ -99,6 +99,8 @@ template<typename T> bool copy(const sadl::layers::Layer<float> &layer, sadl::la
     break;
   case sadl::layers::OperationType::Where:
     break;
+  case sadl::layers::OperationType::Minimum:
+    break;
     // no default to get warning
   }
 
diff --git a/sample/naive_quantization.cpp b/sample/naive_quantization.cpp
index f24658b069f2748b3aba79f3638920181ed58147..d3954851d74aeccac3ce3210722e9586490711f2 100644
--- a/sample/naive_quantization.cpp
+++ b/sample/naive_quantization.cpp
@@ -77,7 +77,8 @@ bool toQuantize(sadl::layers::OperationType::Type type)
          && type != sadl::layers::OperationType::Relu && type != sadl::layers::OperationType::Reshape && type != sadl::layers::OperationType::Shape
          && type != sadl::layers::OperationType::Slice && type != sadl::layers::OperationType::Transpose && type != sadl::layers::OperationType::PReLU
          && type != sadl::layers::OperationType::ScatterND && type != sadl::layers::OperationType::GridSample && type != sadl::layers::OperationType::Resize
-         && type != sadl::layers::OperationType::Compare && type != sadl::layers::OperationType::Where;
+         && type != sadl::layers::OperationType::Compare && type != sadl::layers::OperationType::Where && type != sadl::layers::OperationType::Maximum 
+         && type != sadl::layers::OperationType::Minimum;
 }
 
 template<typename T> void quantizeTensor(const sadl::Tensor<float> &B, sadl::Tensor<T> &Bq)
diff --git a/utests/check.sh b/utests/check.sh
index 8bd005d57de0a2fe61ba1cce1c624d9a7ed3f51d..9dbdbfe1ac245e46cd2dc16380675358c25f9479 100755
--- a/utests/check.sh
+++ b/utests/check.sh
@@ -18,7 +18,7 @@ for F in $L; do
   ../utest.sh ../models/${F}.onnx --no_transpose;
 done
 
-L="conv2d_4_8x8x4_k1x1s1,1_g1_p0,0 conv2d_4_8x8x4_k1x1s1,1_g4_p0,0 conv2d_4_8x8x4_k1x1s2,1_g1_p0,0 conv2d_4_8x8x4_k1x3s1,2_g1_p0,1 conv2d_4_8x8x4_k3x1s1,1_g1_p1,0 conv2d_4_8x8x4_k3x1s1,1_g4_p1,0 conv2d_4_8x8x4_k3x3s1,1_g1_p1,1 conv2d_4_8x8x4_k3x3s2,1_g1_p1,1 conv2d_4_8x8x4_k3x3s2,2_g1_p1,1 conv2d_4_8x8x4_k5x5s1,1_g1_p2,2 conv2d_4_8x8x4_k5x5s1,1_g4_p2,2 conv2d_4_8x8x4_k5x5s2,1_g1_p2,2 conv2d_4_8x9x4_k1x1s2,1_g1_p0,0 conv2d_4_8x9x4_k3x1s1,1_g4_p1,0 conv2d_4_8x9x4_k3x3s1,1_g4_p1,1 conv2d_4_8x9x4_k3x3s2,1_g1_p1,1 conv2d_4_8x9x4_k3x3s2,2_g1_p1,1 conv2d_4_9x8x4_k1x1s1,1_g1_p0,0 conv2d_4_9x8x4_k1x1s2,1_g1_p0,0 conv2d_4_9x8x4_k1x3s1,2_g1_p0,1 conv2d_4_9x8x4_k3x1s1,1_g1_p1,0 conv2d_4_9x8x4_k3x3s1,1_g1_p1,1 conv2d_4_9x8x4_k3x3s2,1_g1_p1,1 conv2d_4_9x8x4_k3x3s2,2_g1_p1,1 conv2d_4_9x8x4_k5x5s1,1_g1_p2,2 conv2d_4_9x8x4_k5x5s2,1_g1_p2,2 conv2d_4_9x9x4_k1x3s1,2_g1_p0,1 repeated_conv slice_pytorch slice_inf_pytorch slice_chw_pytorch prelu_multiple_alpha prelu_single_alpha scatternd_c_pytorch scatternd_hwc_with_conv_pytorch gridsample_bilinear gridsample_nearest gridsample_bilinear_conv gridsample_nearest_conv conv2dt_32_8x8x32_k3,3_s2,2_p1,1_op1,1 conv2dt_32_8x8x32_k4,4_s2,2_p1,1_op0,0 conv2dt_32_8x8x32_k5,5_s2,2_p2,2_op1,1 resize_bilinear_up2_pytorch resize_nearest_up2_pytorch resize_bilinear_up2_16x16x64_pytorch prelu_single_alpha_c32 prelu_multiple_alpha_c32 compare_less compare_greater where_constA_less where_constA_greater where_constB_less where_constB_greater maxpool_k2_s2 maxpool_k3_s3 global_maxpool conv3x1_1x3_s2_g4";
+L="conv2d_4_8x8x4_k1x1s1,1_g1_p0,0 conv2d_4_8x8x4_k1x1s1,1_g4_p0,0 conv2d_4_8x8x4_k1x1s2,1_g1_p0,0 conv2d_4_8x8x4_k1x3s1,2_g1_p0,1 conv2d_4_8x8x4_k3x1s1,1_g1_p1,0 conv2d_4_8x8x4_k3x1s1,1_g4_p1,0 conv2d_4_8x8x4_k3x3s1,1_g1_p1,1 conv2d_4_8x8x4_k3x3s2,1_g1_p1,1 conv2d_4_8x8x4_k3x3s2,2_g1_p1,1 conv2d_4_8x8x4_k5x5s1,1_g1_p2,2 conv2d_4_8x8x4_k5x5s1,1_g4_p2,2 conv2d_4_8x8x4_k5x5s2,1_g1_p2,2 conv2d_4_8x9x4_k1x1s2,1_g1_p0,0 conv2d_4_8x9x4_k3x1s1,1_g4_p1,0 conv2d_4_8x9x4_k3x3s1,1_g4_p1,1 conv2d_4_8x9x4_k3x3s2,1_g1_p1,1 conv2d_4_8x9x4_k3x3s2,2_g1_p1,1 conv2d_4_9x8x4_k1x1s1,1_g1_p0,0 conv2d_4_9x8x4_k1x1s2,1_g1_p0,0 conv2d_4_9x8x4_k1x3s1,2_g1_p0,1 conv2d_4_9x8x4_k3x1s1,1_g1_p1,0 conv2d_4_9x8x4_k3x3s1,1_g1_p1,1 conv2d_4_9x8x4_k3x3s2,1_g1_p1,1 conv2d_4_9x8x4_k3x3s2,2_g1_p1,1 conv2d_4_9x8x4_k5x5s1,1_g1_p2,2 conv2d_4_9x8x4_k5x5s2,1_g1_p2,2 conv2d_4_9x9x4_k1x3s1,2_g1_p0,1 repeated_conv slice_pytorch slice_inf_pytorch slice_chw_pytorch prelu_multiple_alpha prelu_single_alpha scatternd_c_pytorch scatternd_hwc_with_conv_pytorch gridsample_bilinear gridsample_nearest gridsample_bilinear_conv gridsample_nearest_conv conv2dt_32_8x8x32_k3,3_s2,2_p1,1_op1,1 conv2dt_32_8x8x32_k4,4_s2,2_p1,1_op0,0 conv2dt_32_8x8x32_k5,5_s2,2_p2,2_op1,1 resize_bilinear_up2_pytorch resize_nearest_up2_pytorch resize_bilinear_up2_16x16x64_pytorch prelu_single_alpha_c32 prelu_multiple_alpha_c32 compare_less compare_greater where_constA_less where_constA_greater where_constB_less where_constB_greater maxpool_k2_s2 maxpool_k3_s3 global_maxpool conv3x1_1x3_s2_g4 clip";
 for F in $L; do
   ../utest.sh ../models/${F}.onnx;
 done
diff --git a/utests/models/clip.onnx b/utests/models/clip.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..2a352a672525c1e8f31f36f39ae655514031a78c
--- /dev/null
+++ b/utests/models/clip.onnx
@@ -0,0 +1,35 @@
+pytorch1.11.0:Ú
+4
+onnx::Cast_1onnx::Max_3Cast_0"Cast*	
+to 
+3
+onnx::Max_0
+onnx::Max_3onnx::Min_4Max_1"Max
+4
+onnx::Cast_2onnx::Min_5Cast_2"Cast*	
+to 
+)
+onnx::Min_4
+onnx::Min_56Min_3"Mintorch-jit-exportZ%
+onnx::Max_0
+
+
+
+
+
+Z
+onnx::Cast_1
+
+
+Z
+onnx::Cast_2
+
+
+b
+6
+
+
+
+
+
+B