Files
kernel/userspace/broadcom/libbnxt_re/centos/patches/0002-rc-compat-Add-headers-from-v37.3.patch
M. Vefa Bicakci 4112936362 libbnxt_re: Add support for rdma-core v37
The version of rdma-core in StarlingX is based on version 37 provided by
Mellanox's OpenFabrics Enterprise Distribution, and the rdma-core
package's libibverbs has a library ABI version 34. Furthermore,
libbnxt_re's configure.ac script indicates that libbnxt_re is
(currently) compatible with rdma-core versions up to 35.

Despite the aforementioned, while preparing commit 9baff8aa88
("Introduce libbnxt_re version 220.0.5.0"), we were under the impression
that since the ABI version is 34, libbnxt_re would work fine, given that
it compiled without issues.

However, during run-time testing with the Linux-RDMA community's
perftest package, we observed that test programs such as "ib_send_bw"
would crash due to segmentation faults. Further debugging indicated that
libbnxt_re was calling an incorrect function by dereferencing a function
pointer in an "ops" structure, whose definition had been modified
between rdma-core v35 and v37 by inserting a new function pointer, which
caused some function pointer offsets to change. Furthermore, it was
noticed that libbnxt_re's build procedure was assuming that libbnxt_re
was being built against rdma-core v35, instead of what we had thought to
be v34. In conclusion, we had encountered an incompatibility.

Note that Broadcom has since released a newer driver and library bundle
(v221.1.28.0), but the newer version of libbnxt_re in that bundle does
not support rdma-core v37 either.

Due to what is discussed above, this commit patches libbnxt_re so that
it is compatible with rdma-core v37. We found that the API changes
between rdma-core v35 and v37 are Mellanox-specific and have no impact
on libbnxt_re. As a result, this commit only imports rdma-core v37.3's
headers and ensures that libbnxt_re can be built against the newer
headers.

Verification:
- An ISO image was built with this commit using an incremental and
  monolithic build procedure.
- The ISO image was installed and bootstrapped in low-latency All-in-One
  simplex mode on a server that has a quad-port Broadcom NetXtreme-E
  57504 network adapter.
- ib_send_bw and ib_send_lat utilities in the Linux-RDMA community's
  perftest package were observed to no longer crash due to segmentation
  faults with these changes.
- Basic tests were carried by connecting the third and fourth ports of
  the network adapter with a fiber and running traffic across the two
  ports. The test commands were as follows:

    # Server
    sudo ib_send_bw -F -D 10 -d bnxt_re2
    # Client
    sudo ib_send_bw -F -D 10 -d bnxt_re3 10.240.240.12

  According to the following commands, RDMA traffic was successfully
  processed by the aforementioned ports:

    ethtool -S enp101s0f2
    ethtool -S enp101s0f3
    sudo cat /sys/kernel/debug/bnxt_re/bnxt_re{2,3}/info

Story: 2009915
Task: 44916

Change-Id: I79528fb2d04e972dd69750f6bb3d132f9de5ada8
Fixes: 9baff8aa88 ("Introduce libbnxt_re version 220.0.5.0")
Signed-off-by: M. Vefa Bicakci <vefa.bicakci@windriver.com>
2022-04-05 17:37:30 -04:00

8187 lines
236 KiB
Diff

From 0ace34eb5bcc23dacc68285cd43b828331b42262 Mon Sep 17 00:00:00 2001
From: "M. Vefa Bicakci" <vefa.bicakci@windriver.com>
Date: Thu, 31 Mar 2022 15:52:34 -0400
Subject: [PATCH] rc-compat: Add headers from v37.3
All of the headers were copied from rdma-core repository tag "v37.3". We
do not copy from the v36.y tag, because Mellanox's OpenFabrics
Enterprise Distribution (OFED) package in use by StarlingX is based on
rdma-core v37, and we do not need headers from v36 as a result.
This commit is intended to go away when the vendor releases a version of
libbnxt_re with native support for Mellanox OFED's rdma-core v37
baseline.
The only change to the headers were as follows:
- Some of the #include directives in driver.h were modified to align
them to their counterparts in the v35 directory, by replacing
angle-brackets ('<' and '>') with double quotes ('"') so that the
files in the rc-compat/v37 directory would be used.
- The following line was added to driver.h:
#define IBV_DEVICE_LIBRARY_EXTENSION rdmav34
- kernel-abi_ib_user_verbs.h was generated using a script in the
rdma-core repository as follows:
python3 ../buildlib/make_abi_structs.py \
./rdma/ib_user_verbs.h \
kernel-abi_ib_user_verbs.h
- config.h was adapted from src/rc-compat/v35/config.h in libbnxt_re's
source code.
Signed-off-by: M. Vefa Bicakci <vefa.bicakci@windriver.com>
---
src/rc-compat/v37/ccan/array_size.h | 26 +
src/rc-compat/v37/ccan/bitmap.h | 239 ++++
src/rc-compat/v37/ccan/build_assert.h | 40 +
src/rc-compat/v37/ccan/check_type.h | 64 +
src/rc-compat/v37/ccan/compiler.h | 230 ++++
src/rc-compat/v37/ccan/container_of.h | 146 ++
src/rc-compat/v37/ccan/ilog.h | 151 ++
src/rc-compat/v37/ccan/list.h | 842 ++++++++++++
src/rc-compat/v37/ccan/minmax.h | 65 +
src/rc-compat/v37/ccan/str.h | 228 +++
src/rc-compat/v37/ccan/str_debug.h | 30 +
src/rc-compat/v37/cmd_ioctl.h | 412 ++++++
src/rc-compat/v37/config.h | 56 +
src/rc-compat/v37/driver.h | 755 ++++++++++
src/rc-compat/v37/ib_user_verbs.h | 1301 ++++++++++++++++++
src/rc-compat/v37/kern-abi.h | 322 +++++
src/rc-compat/v37/kernel-abi_ib_user_verbs.h | 1114 +++++++++++++++
src/rc-compat/v37/rdma_user_ioctl_cmds.h | 87 ++
src/rc-compat/v37/util/cl_qmap.h | 970 +++++++++++++
src/rc-compat/v37/util/compiler.h | 54 +
src/rc-compat/v37/util/mmio.h | 267 ++++
src/rc-compat/v37/util/node_name_map.h | 19 +
src/rc-compat/v37/util/rdma_nl.h | 52 +
src/rc-compat/v37/util/symver.h | 107 ++
src/rc-compat/v37/util/udma_barrier.h | 267 ++++
src/rc-compat/v37/util/util.h | 93 ++
26 files changed, 7937 insertions(+)
create mode 100644 src/rc-compat/v37/ccan/array_size.h
create mode 100644 src/rc-compat/v37/ccan/bitmap.h
create mode 100644 src/rc-compat/v37/ccan/build_assert.h
create mode 100644 src/rc-compat/v37/ccan/check_type.h
create mode 100644 src/rc-compat/v37/ccan/compiler.h
create mode 100644 src/rc-compat/v37/ccan/container_of.h
create mode 100644 src/rc-compat/v37/ccan/ilog.h
create mode 100644 src/rc-compat/v37/ccan/list.h
create mode 100644 src/rc-compat/v37/ccan/minmax.h
create mode 100644 src/rc-compat/v37/ccan/str.h
create mode 100644 src/rc-compat/v37/ccan/str_debug.h
create mode 100644 src/rc-compat/v37/cmd_ioctl.h
create mode 100644 src/rc-compat/v37/config.h
create mode 100644 src/rc-compat/v37/driver.h
create mode 100644 src/rc-compat/v37/ib_user_verbs.h
create mode 100644 src/rc-compat/v37/kern-abi.h
create mode 100644 src/rc-compat/v37/kernel-abi_ib_user_verbs.h
create mode 100644 src/rc-compat/v37/rdma_user_ioctl_cmds.h
create mode 100644 src/rc-compat/v37/util/cl_qmap.h
create mode 100644 src/rc-compat/v37/util/compiler.h
create mode 100644 src/rc-compat/v37/util/mmio.h
create mode 100644 src/rc-compat/v37/util/node_name_map.h
create mode 100644 src/rc-compat/v37/util/rdma_nl.h
create mode 100644 src/rc-compat/v37/util/symver.h
create mode 100644 src/rc-compat/v37/util/udma_barrier.h
create mode 100644 src/rc-compat/v37/util/util.h
diff --git a/src/rc-compat/v37/ccan/array_size.h b/src/rc-compat/v37/ccan/array_size.h
new file mode 100644
index 000000000000..37b200f5e239
--- /dev/null
+++ b/src/rc-compat/v37/ccan/array_size.h
@@ -0,0 +1,26 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_ARRAY_SIZE_H
+#define CCAN_ARRAY_SIZE_H
+#include "config.h"
+#include <ccan/build_assert.h>
+
+/**
+ * ARRAY_SIZE - get the number of elements in a visible array
+ * @arr: the array whose size you want.
+ *
+ * This does not work on pointers, or arrays declared as [], or
+ * function parameters. With correct compiler support, such usage
+ * will cause a build error (see build_assert).
+ */
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + _array_size_chk(arr))
+
+#if HAVE_BUILTIN_TYPES_COMPATIBLE_P && HAVE_TYPEOF
+/* Two gcc extensions.
+ * &a[0] degrades to a pointer: a different type from an array */
+#define _array_size_chk(arr) \
+ BUILD_ASSERT_OR_ZERO(!__builtin_types_compatible_p(typeof(arr), \
+ typeof(&(arr)[0])))
+#else
+#define _array_size_chk(arr) 0
+#endif
+#endif /* CCAN_ALIGNOF_H */
diff --git a/src/rc-compat/v37/ccan/bitmap.h b/src/rc-compat/v37/ccan/bitmap.h
new file mode 100644
index 000000000000..ff0b8c83da46
--- /dev/null
+++ b/src/rc-compat/v37/ccan/bitmap.h
@@ -0,0 +1,239 @@
+/* Licensed under LGPLv2+ - see LICENSE file for details */
+#ifndef CCAN_BITMAP_H_
+#define CCAN_BITMAP_H_
+
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+typedef unsigned long bitmap_word;
+
+#define BITMAP_WORD_BITS (sizeof(bitmap_word) * CHAR_BIT)
+#define BITMAP_NWORDS(_n) \
+ (((_n) + BITMAP_WORD_BITS - 1) / BITMAP_WORD_BITS)
+
+/*
+ * We wrap each word in a structure for type checking.
+ */
+typedef struct {
+ bitmap_word w;
+} bitmap;
+
+#define BITMAP_DECLARE(_name, _nbits) \
+ bitmap (_name)[BITMAP_NWORDS(_nbits)]
+
+static inline size_t bitmap_sizeof(unsigned long nbits)
+{
+ return BITMAP_NWORDS(nbits) * sizeof(bitmap_word);
+}
+
+static inline bitmap_word bitmap_bswap(bitmap_word w)
+{
+ /* We do not need to have the bitmap in any specific endianness */
+ return w;
+}
+
+#define BITMAP_WORD(_bm, _n) ((_bm)[(_n) / BITMAP_WORD_BITS].w)
+#define BITMAP_WORDBIT(_n) \
+ (bitmap_bswap(1UL << (BITMAP_WORD_BITS - ((_n) % BITMAP_WORD_BITS) - 1)))
+
+#define BITMAP_HEADWORDS(_nbits) \
+ ((_nbits) / BITMAP_WORD_BITS)
+#define BITMAP_HEADBYTES(_nbits) \
+ (BITMAP_HEADWORDS(_nbits) * sizeof(bitmap_word))
+
+#define BITMAP_TAILWORD(_bm, _nbits) \
+ ((_bm)[BITMAP_HEADWORDS(_nbits)].w)
+#define BITMAP_HASTAIL(_nbits) (((_nbits) % BITMAP_WORD_BITS) != 0)
+#define BITMAP_TAILBITS(_nbits) \
+ (bitmap_bswap(~(-1UL >> ((_nbits) % BITMAP_WORD_BITS))))
+#define BITMAP_TAIL(_bm, _nbits) \
+ (BITMAP_TAILWORD(_bm, _nbits) & BITMAP_TAILBITS(_nbits))
+
+static inline void bitmap_set_bit(bitmap *bmap, unsigned long n)
+{
+ BITMAP_WORD(bmap, n) |= BITMAP_WORDBIT(n);
+}
+
+static inline void bitmap_clear_bit(bitmap *bmap, unsigned long n)
+{
+ BITMAP_WORD(bmap, n) &= ~BITMAP_WORDBIT(n);
+}
+
+static inline void bitmap_change_bit(bitmap *bmap, unsigned long n)
+{
+ BITMAP_WORD(bmap, n) ^= BITMAP_WORDBIT(n);
+}
+
+static inline bool bitmap_test_bit(const bitmap *bmap, unsigned long n)
+{
+ return !!(BITMAP_WORD(bmap, n) & BITMAP_WORDBIT(n));
+}
+
+void bitmap_zero_range(bitmap *bmap, unsigned long n, unsigned long m);
+void bitmap_fill_range(bitmap *bmap, unsigned long n, unsigned long m);
+
+static inline void bitmap_zero(bitmap *bmap, unsigned long nbits)
+{
+ memset(bmap, 0, bitmap_sizeof(nbits));
+}
+
+static inline void bitmap_fill(bitmap *bmap, unsigned long nbits)
+{
+ memset(bmap, 0xff, bitmap_sizeof(nbits));
+}
+
+static inline void bitmap_copy(bitmap *dst, const bitmap *src,
+ unsigned long nbits)
+{
+ memcpy(dst, src, bitmap_sizeof(nbits));
+}
+
+#define BITMAP_DEF_BINOP(_name, _op) \
+ static inline void bitmap_##_name(bitmap *dst, bitmap *src1, bitmap *src2, \
+ unsigned long nbits) \
+ { \
+ unsigned long i = 0; \
+ for (i = 0; i < BITMAP_NWORDS(nbits); i++) { \
+ dst[i].w = src1[i].w _op src2[i].w; \
+ } \
+ }
+
+BITMAP_DEF_BINOP(and, &)
+BITMAP_DEF_BINOP(or, |)
+BITMAP_DEF_BINOP(xor, ^)
+BITMAP_DEF_BINOP(andnot, & ~)
+
+#undef BITMAP_DEF_BINOP
+
+static inline void bitmap_complement(bitmap *dst, const bitmap *src,
+ unsigned long nbits)
+{
+ unsigned long i;
+
+ for (i = 0; i < BITMAP_NWORDS(nbits); i++)
+ dst[i].w = ~src[i].w;
+}
+
+static inline bool bitmap_equal(const bitmap *src1, const bitmap *src2,
+ unsigned long nbits)
+{
+ return (memcmp(src1, src2, BITMAP_HEADBYTES(nbits)) == 0)
+ && (!BITMAP_HASTAIL(nbits)
+ || (BITMAP_TAIL(src1, nbits) == BITMAP_TAIL(src2, nbits)));
+}
+
+static inline bool bitmap_intersects(const bitmap *src1, const bitmap *src2,
+ unsigned long nbits)
+{
+ unsigned long i;
+
+ for (i = 0; i < BITMAP_HEADWORDS(nbits); i++) {
+ if (src1[i].w & src2[i].w)
+ return true;
+ }
+ if (BITMAP_HASTAIL(nbits) &&
+ (BITMAP_TAIL(src1, nbits) & BITMAP_TAIL(src2, nbits)))
+ return true;
+ return false;
+}
+
+static inline bool bitmap_subset(const bitmap *src1, const bitmap *src2,
+ unsigned long nbits)
+{
+ unsigned long i;
+
+ for (i = 0; i < BITMAP_HEADWORDS(nbits); i++) {
+ if (src1[i].w & ~src2[i].w)
+ return false;
+ }
+ if (BITMAP_HASTAIL(nbits) &&
+ (BITMAP_TAIL(src1, nbits) & ~BITMAP_TAIL(src2, nbits)))
+ return false;
+ return true;
+}
+
+static inline bool bitmap_full(const bitmap *bmap, unsigned long nbits)
+{
+ unsigned long i;
+
+ for (i = 0; i < BITMAP_HEADWORDS(nbits); i++) {
+ if (bmap[i].w != -1UL)
+ return false;
+ }
+ if (BITMAP_HASTAIL(nbits) &&
+ (BITMAP_TAIL(bmap, nbits) != BITMAP_TAILBITS(nbits)))
+ return false;
+
+ return true;
+}
+
+static inline bool bitmap_empty(const bitmap *bmap, unsigned long nbits)
+{
+ unsigned long i;
+
+ for (i = 0; i < BITMAP_HEADWORDS(nbits); i++) {
+ if (bmap[i].w != 0)
+ return false;
+ }
+ if (BITMAP_HASTAIL(nbits) && (BITMAP_TAIL(bmap, nbits) != 0))
+ return false;
+
+ return true;
+}
+
+unsigned long bitmap_ffs(const bitmap *bmap,
+ unsigned long n, unsigned long m);
+
+/*
+ * Allocation functions
+ */
+static inline bitmap *bitmap_alloc(unsigned long nbits)
+{
+ return malloc(bitmap_sizeof(nbits));
+}
+
+static inline bitmap *bitmap_alloc0(unsigned long nbits)
+{
+ bitmap *bmap;
+
+ bmap = bitmap_alloc(nbits);
+ if (bmap)
+ bitmap_zero(bmap, nbits);
+ return bmap;
+}
+
+static inline bitmap *bitmap_alloc1(unsigned long nbits)
+{
+ bitmap *bmap;
+
+ bmap = bitmap_alloc(nbits);
+ if (bmap)
+ bitmap_fill(bmap, nbits);
+ return bmap;
+}
+
+static inline bitmap *bitmap_realloc0(bitmap *bmap, unsigned long obits,
+ unsigned long nbits)
+{
+ bmap = realloc(bmap, bitmap_sizeof(nbits));
+
+ if ((nbits > obits) && bmap)
+ bitmap_zero_range(bmap, obits, nbits);
+
+ return bmap;
+}
+
+static inline bitmap *bitmap_realloc1(bitmap *bmap, unsigned long obits,
+ unsigned long nbits)
+{
+ bmap = realloc(bmap, bitmap_sizeof(nbits));
+
+ if ((nbits > obits) && bmap)
+ bitmap_fill_range(bmap, obits, nbits);
+
+ return bmap;
+}
+
+#endif /* CCAN_BITMAP_H_ */
diff --git a/src/rc-compat/v37/ccan/build_assert.h b/src/rc-compat/v37/ccan/build_assert.h
new file mode 100644
index 000000000000..0ecd7ff36633
--- /dev/null
+++ b/src/rc-compat/v37/ccan/build_assert.h
@@ -0,0 +1,40 @@
+/* CC0 (Public domain) - see LICENSE.CC0 file for details */
+#ifndef CCAN_BUILD_ASSERT_H
+#define CCAN_BUILD_ASSERT_H
+
+/**
+ * BUILD_ASSERT - assert a build-time dependency.
+ * @cond: the compile-time condition which must be true.
+ *
+ * Your compile will fail if the condition isn't true, or can't be evaluated
+ * by the compiler. This can only be used within a function.
+ *
+ * Example:
+ * #include <stddef.h>
+ * ...
+ * static char *foo_to_char(struct foo *foo)
+ * {
+ * // This code needs string to be at start of foo.
+ * BUILD_ASSERT(offsetof(struct foo, string) == 0);
+ * return (char *)foo;
+ * }
+ */
+#define BUILD_ASSERT(cond) \
+ do { (void) sizeof(char [1 - 2*!(cond)]); } while(0)
+
+/**
+ * BUILD_ASSERT_OR_ZERO - assert a build-time dependency, as an expression.
+ * @cond: the compile-time condition which must be true.
+ *
+ * Your compile will fail if the condition isn't true, or can't be evaluated
+ * by the compiler. This can be used in an expression: its value is "0".
+ *
+ * Example:
+ * #define foo_to_char(foo) \
+ * ((char *)(foo) \
+ * + BUILD_ASSERT_OR_ZERO(offsetof(struct foo, string) == 0))
+ */
+#define BUILD_ASSERT_OR_ZERO(cond) \
+ (sizeof(char [1 - 2*!(cond)]) - 1)
+
+#endif /* CCAN_BUILD_ASSERT_H */
diff --git a/src/rc-compat/v37/ccan/check_type.h b/src/rc-compat/v37/ccan/check_type.h
new file mode 100644
index 000000000000..a576a5018e01
--- /dev/null
+++ b/src/rc-compat/v37/ccan/check_type.h
@@ -0,0 +1,64 @@
+/* CC0 (Public domain) - see LICENSE.CC0 file for details */
+#ifndef CCAN_CHECK_TYPE_H
+#define CCAN_CHECK_TYPE_H
+#include "config.h"
+
+/**
+ * check_type - issue a warning or build failure if type is not correct.
+ * @expr: the expression whose type we should check (not evaluated).
+ * @type: the exact type we expect the expression to be.
+ *
+ * This macro is usually used within other macros to try to ensure that a macro
+ * argument is of the expected type. No type promotion of the expression is
+ * done: an unsigned int is not the same as an int!
+ *
+ * check_type() always evaluates to 0.
+ *
+ * If your compiler does not support typeof, then the best we can do is fail
+ * to compile if the sizes of the types are unequal (a less complete check).
+ *
+ * Example:
+ * // They should always pass a 64-bit value to _set_some_value!
+ * #define set_some_value(expr) \
+ * _set_some_value((check_type((expr), uint64_t), (expr)))
+ */
+
+/**
+ * check_types_match - issue a warning or build failure if types are not same.
+ * @expr1: the first expression (not evaluated).
+ * @expr2: the second expression (not evaluated).
+ *
+ * This macro is usually used within other macros to try to ensure that
+ * arguments are of identical types. No type promotion of the expressions is
+ * done: an unsigned int is not the same as an int!
+ *
+ * check_types_match() always evaluates to 0.
+ *
+ * If your compiler does not support typeof, then the best we can do is fail
+ * to compile if the sizes of the types are unequal (a less complete check).
+ *
+ * Example:
+ * // Do subtraction to get to enclosing type, but make sure that
+ * // pointer is of correct type for that member.
+ * #define container_of(mbr_ptr, encl_type, mbr) \
+ * (check_types_match((mbr_ptr), &((encl_type *)0)->mbr), \
+ * ((encl_type *) \
+ * ((char *)(mbr_ptr) - offsetof(enclosing_type, mbr))))
+ */
+#if HAVE_TYPEOF
+#define check_type(expr, type) \
+ ((typeof(expr) *)0 != (type *)0)
+
+#define check_types_match(expr1, expr2) \
+ ((typeof(expr1) *)0 != (typeof(expr2) *)0)
+#else
+#include <ccan/build_assert.h>
+/* Without typeof, we can only test the sizes. */
+#define check_type(expr, type) \
+ BUILD_ASSERT_OR_ZERO(sizeof(expr) == sizeof(type))
+
+#define check_types_match(expr1, expr2) \
+ BUILD_ASSERT_OR_ZERO(sizeof(expr1) == sizeof(expr2))
+#endif /* HAVE_TYPEOF */
+
+#endif /* CCAN_CHECK_TYPE_H */
diff --git a/src/rc-compat/v37/ccan/compiler.h b/src/rc-compat/v37/ccan/compiler.h
new file mode 100644
index 000000000000..cc0d4d1af2ca
--- /dev/null
+++ b/src/rc-compat/v37/ccan/compiler.h
@@ -0,0 +1,230 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_COMPILER_H
+#define CCAN_COMPILER_H
+#include "config.h"
+
+#ifndef COLD
+/**
+ * COLD - a function is unlikely to be called.
+ *
+ * Used to mark an unlikely code path and optimize appropriately.
+ * It is usually used on logging or error routines.
+ *
+ * Example:
+ * static void COLD moan(const char *reason)
+ * {
+ * fprintf(stderr, "Error: %s (%s)\n", reason, strerror(errno));
+ * }
+ */
+#define COLD __attribute__((__cold__))
+#endif
+
+#ifndef NORETURN
+/**
+ * NORETURN - a function does not return
+ *
+ * Used to mark a function which exits; useful for suppressing warnings.
+ *
+ * Example:
+ * static void NORETURN fail(const char *reason)
+ * {
+ * fprintf(stderr, "Error: %s (%s)\n", reason, strerror(errno));
+ * exit(1);
+ * }
+ */
+#define NORETURN __attribute__((__noreturn__))
+#endif
+
+#ifndef PRINTF_FMT
+/**
+ * PRINTF_FMT - a function takes printf-style arguments
+ * @nfmt: the 1-based number of the function's format argument.
+ * @narg: the 1-based number of the function's first variable argument.
+ *
+ * This allows the compiler to check your parameters as it does for printf().
+ *
+ * Example:
+ * void PRINTF_FMT(2,3) my_printf(const char *prefix, const char *fmt, ...);
+ */
+#define PRINTF_FMT(nfmt, narg) \
+ __attribute__((format(__printf__, nfmt, narg)))
+#endif
+
+#ifndef CONST_FUNCTION
+/**
+ * CONST_FUNCTION - a function's return depends only on its argument
+ *
+ * This allows the compiler to assume that the function will return the exact
+ * same value for the exact same arguments. This implies that the function
+ * must not use global variables, or dereference pointer arguments.
+ */
+#define CONST_FUNCTION __attribute__((__const__))
+
+#ifndef PURE_FUNCTION
+/**
+ * PURE_FUNCTION - a function is pure
+ *
+ * A pure function is one that has no side effects other than it's return value
+ * and uses no inputs other than it's arguments and global variables.
+ */
+#define PURE_FUNCTION __attribute__((__pure__))
+#endif
+#endif
+
+#ifndef UNNEEDED
+/**
+ * UNNEEDED - a variable/function may not be needed
+ *
+ * This suppresses warnings about unused variables or functions, but tells
+ * the compiler that if it is unused it need not emit it into the source code.
+ *
+ * Example:
+ * // With some preprocessor options, this is unnecessary.
+ * static UNNEEDED int counter;
+ *
+ * // With some preprocessor options, this is unnecessary.
+ * static UNNEEDED void add_to_counter(int add)
+ * {
+ * counter += add;
+ * }
+ */
+#define UNNEEDED __attribute__((__unused__))
+#endif
+
+#ifndef NEEDED
+/**
+ * NEEDED - a variable/function is needed
+ *
+ * This suppresses warnings about unused variables or functions, but tells
+ * the compiler that it must exist even if it (seems) unused.
+ *
+ * Example:
+ * // Even if this is unused, these are vital for debugging.
+ * static NEEDED int counter;
+ * static NEEDED void dump_counter(void)
+ * {
+ * printf("Counter is %i\n", counter);
+ * }
+ */
+#define NEEDED __attribute__((__used__))
+#endif
+
+#ifndef UNUSED
+/**
+ * UNUSED - a parameter is unused
+ *
+ * Some compilers (eg. gcc with -W or -Wunused) warn about unused
+ * function parameters. This suppresses such warnings and indicates
+ * to the reader that it's deliberate.
+ *
+ * Example:
+ * // This is used as a callback, so needs to have this prototype.
+ * static int some_callback(void *unused UNUSED)
+ * {
+ * return 0;
+ * }
+ */
+#define UNUSED __attribute__((__unused__))
+#endif
+
+#ifndef IS_COMPILE_CONSTANT
+/**
+ * IS_COMPILE_CONSTANT - does the compiler know the value of this expression?
+ * @expr: the expression to evaluate
+ *
+ * When an expression manipulation is complicated, it is usually better to
+ * implement it in a function. However, if the expression being manipulated is
+ * known at compile time, it is better to have the compiler see the entire
+ * expression so it can simply substitute the result.
+ *
+ * This can be done using the IS_COMPILE_CONSTANT() macro.
+ *
+ * Example:
+ * enum greek { ALPHA, BETA, GAMMA, DELTA, EPSILON };
+ *
+ * // Out-of-line version.
+ * const char *greek_name(enum greek greek);
+ *
+ * // Inline version.
+ * static inline const char *_greek_name(enum greek greek)
+ * {
+ * switch (greek) {
+ * case ALPHA: return "alpha";
+ * case BETA: return "beta";
+ * case GAMMA: return "gamma";
+ * case DELTA: return "delta";
+ * case EPSILON: return "epsilon";
+ * default: return "**INVALID**";
+ * }
+ * }
+ *
+ * // Use inline if compiler knows answer. Otherwise call function
+ * // to avoid copies of the same code everywhere.
+ * #define greek_name(g) \
+ * (IS_COMPILE_CONSTANT(greek) ? _greek_name(g) : greek_name(g))
+ */
+#define IS_COMPILE_CONSTANT(expr) __builtin_constant_p(expr)
+#endif
+
+#ifndef WARN_UNUSED_RESULT
+/**
+ * WARN_UNUSED_RESULT - warn if a function return value is unused.
+ *
+ * Used to mark a function where it is extremely unlikely that the caller
+ * can ignore the result, eg realloc().
+ *
+ * Example:
+ * // buf param may be freed by this; need return value!
+ * static char *WARN_UNUSED_RESULT enlarge(char *buf, unsigned *size)
+ * {
+ * return realloc(buf, (*size) *= 2);
+ * }
+ */
+#define WARN_UNUSED_RESULT __attribute__((__warn_unused_result__))
+#endif
+
+
+/**
+ * WARN_DEPRECATED - warn that a function/type/variable is deprecated when used.
+ *
+ * Used to mark a function, type or variable should not be used.
+ *
+ * Example:
+ * WARN_DEPRECATED char *oldfunc(char *buf);
+ */
+#define WARN_DEPRECATED __attribute__((__deprecated__))
+
+
+/**
+ * NO_NULL_ARGS - specify that no arguments to this function can be NULL.
+ *
+ * The compiler will warn if any pointer args are NULL.
+ *
+ * Example:
+ * NO_NULL_ARGS char *my_copy(char *buf);
+ */
+#define NO_NULL_ARGS __attribute__((__nonnull__))
+
+/**
+ * NON_NULL_ARGS - specify that some arguments to this function can't be NULL.
+ * @...: 1-based argument numbers for which args can't be NULL.
+ *
+ * The compiler will warn if any of the specified pointer args are NULL.
+ *
+ * Example:
+ * char *my_copy2(char *buf, char *maybenull) NON_NULL_ARGS(1);
+ */
+#define NON_NULL_ARGS(...) __attribute__((__nonnull__(__VA_ARGS__)))
+
+
+/**
+ * LAST_ARG_NULL - specify the last argument of a variadic function must be NULL.
+ *
+ * The compiler will warn if the last argument isn't NULL.
+ *
+ * Example:
+ * char *join_string(char *buf, ...) LAST_ARG_NULL;
+ */
+#define LAST_ARG_NULL __attribute__((__sentinel__))
+
+#endif /* CCAN_COMPILER_H */
diff --git a/src/rc-compat/v37/ccan/container_of.h b/src/rc-compat/v37/ccan/container_of.h
new file mode 100644
index 000000000000..9180f37f0d15
--- /dev/null
+++ b/src/rc-compat/v37/ccan/container_of.h
@@ -0,0 +1,146 @@
+/* CC0 (Public domain) - see LICENSE.CC0 file for details */
+#ifndef CCAN_CONTAINER_OF_H
+#define CCAN_CONTAINER_OF_H
+#include <stddef.h>
+
+#include "config.h"
+#include <ccan/check_type.h>
+
+/**
+ * container_of - get pointer to enclosing structure
+ * @member_ptr: pointer to the structure member
+ * @containing_type: the type this member is within
+ * @member: the name of this member within the structure.
+ *
+ * Given a pointer to a member of a structure, this macro does pointer
+ * subtraction to return the pointer to the enclosing type.
+ *
+ * Example:
+ * struct foo {
+ * int fielda, fieldb;
+ * // ...
+ * };
+ * struct info {
+ * int some_other_field;
+ * struct foo my_foo;
+ * };
+ *
+ * static struct info *foo_to_info(struct foo *foo)
+ * {
+ * return container_of(foo, struct info, my_foo);
+ * }
+ */
+#ifndef container_of
+#define container_of(member_ptr, containing_type, member) \
+ ((containing_type *) \
+ ((char *)(member_ptr) \
+ - container_off(containing_type, member)) \
+ + check_types_match(*(member_ptr), ((containing_type *)0)->member))
+#endif
+
+/**
+ * container_of_or_null - get pointer to enclosing structure, or NULL
+ * @member_ptr: pointer to the structure member
+ * @containing_type: the type this member is within
+ * @member: the name of this member within the structure.
+ *
+ * Given a pointer to a member of a structure, this macro does pointer
+ * subtraction to return the pointer to the enclosing type, unless it
+ * is given NULL, in which case it also returns NULL.
+ *
+ * Example:
+ * struct foo {
+ * int fielda, fieldb;
+ * // ...
+ * };
+ * struct info {
+ * int some_other_field;
+ * struct foo my_foo;
+ * };
+ *
+ * static struct info *foo_to_info_allowing_null(struct foo *foo)
+ * {
+ * return container_of_or_null(foo, struct info, my_foo);
+ * }
+ */
+static inline char *container_of_or_null_(void *member_ptr, size_t offset)
+{
+ return member_ptr ? (char *)member_ptr - offset : NULL;
+}
+#define container_of_or_null(member_ptr, containing_type, member) \
+ ((containing_type *) \
+ container_of_or_null_(member_ptr, \
+ container_off(containing_type, member)) \
+ + check_types_match(*(member_ptr), ((containing_type *)0)->member))
+
+/**
+ * container_off - get offset to enclosing structure
+ * @containing_type: the type this member is within
+ * @member: the name of this member within the structure.
+ *
+ * Given a pointer to a member of a structure, this macro does
+ * typechecking and figures out the offset to the enclosing type.
+ *
+ * Example:
+ * struct foo {
+ * int fielda, fieldb;
+ * // ...
+ * };
+ * struct info {
+ * int some_other_field;
+ * struct foo my_foo;
+ * };
+ *
+ * static struct info *foo_to_info(struct foo *foo)
+ * {
+ * size_t off = container_off(struct info, my_foo);
+ * return (void *)((char *)foo - off);
+ * }
+ */
+#define container_off(containing_type, member) \
+ offsetof(containing_type, member)
+
+/**
+ * container_of_var - get pointer to enclosing structure using a variable
+ * @member_ptr: pointer to the structure member
+ * @container_var: a pointer of same type as this member's container
+ * @member: the name of this member within the structure.
+ *
+ * Given a pointer to a member of a structure, this macro does pointer
+ * subtraction to return the pointer to the enclosing type.
+ *
+ * Example:
+ * static struct info *foo_to_i(struct foo *foo)
+ * {
+ * struct info *i = container_of_var(foo, i, my_foo);
+ * return i;
+ * }
+ */
+#if HAVE_TYPEOF
+#define container_of_var(member_ptr, container_var, member) \
+ container_of(member_ptr, typeof(*container_var), member)
+#else
+#define container_of_var(member_ptr, container_var, member) \
+ ((void *)((char *)(member_ptr) - \
+ container_off_var(container_var, member)))
+#endif
+
+/**
+ * container_off_var - get offset of a field in enclosing structure
+ * @container_var: a pointer to a container structure
+ * @member: the name of a member within the structure.
+ *
+ * Given (any) pointer to a structure and a its member name, this
+ * macro does pointer subtraction to return offset of member in a
+ * structure memory layout.
+ *
+ */
+#if HAVE_TYPEOF
+#define container_off_var(var, member) \
+ container_off(typeof(*var), member)
+#else
+#define container_off_var(var, member) \
+ ((const char *)&(var)->member - (const char *)(var))
+#endif
+
+#endif /* CCAN_CONTAINER_OF_H */
diff --git a/src/rc-compat/v37/ccan/ilog.h b/src/rc-compat/v37/ccan/ilog.h
new file mode 100644
index 000000000000..2793a7056afe
--- /dev/null
+++ b/src/rc-compat/v37/ccan/ilog.h
@@ -0,0 +1,151 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#if !defined(_ilog_H)
+# define _ilog_H (1)
+# include "config.h"
+# include <stdint.h>
+# include <limits.h>
+# include <ccan/compiler.h>
+
+/**
+ * ilog32 - Integer binary logarithm of a 32-bit value.
+ * @_v: A 32-bit value.
+ * Returns floor(log2(_v))+1, or 0 if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * Note that many uses will resolve to the fast macro version instead.
+ *
+ * See Also:
+ * ilog32_nz(), ilog64()
+ *
+ * Example:
+ * // Rounds up to next power of 2 (if not a power of 2).
+ * static uint32_t round_up32(uint32_t i)
+ * {
+ * assert(i != 0);
+ * return 1U << ilog32(i-1);
+ * }
+ */
+int ilog32(uint32_t _v);
+
+/**
+ * ilog32_nz - Integer binary logarithm of a non-zero 32-bit value.
+ * @_v: A 32-bit value.
+ * Returns floor(log2(_v))+1, or undefined if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * Note that many uses will resolve to the fast macro version instead.
+ * See Also:
+ * ilog32(), ilog64_nz()
+ * Example:
+ * // Find Last Set (ie. highest bit set, 0 to 31).
+ * static uint32_t fls32(uint32_t i)
+ * {
+ * assert(i != 0);
+ * return ilog32_nz(i) - 1;
+ * }
+ */
+int ilog32_nz(uint32_t _v);
+
+/**
+ * ilog64 - Integer binary logarithm of a 64-bit value.
+ * @_v: A 64-bit value.
+ * Returns floor(log2(_v))+1, or 0 if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * Note that many uses will resolve to the fast macro version instead.
+ * See Also:
+ * ilog64_nz(), ilog32()
+ */
+int ilog64(uint64_t _v);
+
+/**
+ * ilog64_nz - Integer binary logarithm of a non-zero 64-bit value.
+ * @_v: A 64-bit value.
+ * Returns floor(log2(_v))+1, or undefined if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * Note that many uses will resolve to the fast macro version instead.
+ * See Also:
+ * ilog64(), ilog32_nz()
+ */
+int ilog64_nz(uint64_t _v);
+
+/**
+ * STATIC_ILOG_32 - The integer logarithm of an (unsigned, 32-bit) constant.
+ * @_v: A non-negative 32-bit constant.
+ * Returns floor(log2(_v))+1, or 0 if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * This macro should only be used when you need a compile-time constant,
+ * otherwise ilog32 or ilog32_nz are just as fast and more flexible.
+ *
+ * Example:
+ * #define MY_PAGE_SIZE 4096
+ * #define MY_PAGE_BITS (STATIC_ILOG_32(PAGE_SIZE) - 1)
+ */
+#define STATIC_ILOG_32(_v) (STATIC_ILOG5((uint32_t)(_v)))
+
+/**
+ * STATIC_ILOG_64 - The integer logarithm of an (unsigned, 64-bit) constant.
+ * @_v: A non-negative 64-bit constant.
+ * Returns floor(log2(_v))+1, or 0 if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * This macro should only be used when you need a compile-time constant,
+ * otherwise ilog64 or ilog64_nz are just as fast and more flexible.
+ */
+#define STATIC_ILOG_64(_v) (STATIC_ILOG6((uint64_t)(_v)))
+
+/* Private implementation details */
+
+/*Note the casts to (int) below: this prevents "upgrading"
+ the type of an entire expression to an (unsigned) size_t.*/
+#if INT_MAX>=2147483647 && HAVE_BUILTIN_CLZ
+#define builtin_ilog32_nz(v) \
+ (((int)sizeof(unsigned)*CHAR_BIT) - __builtin_clz(v))
+#elif LONG_MAX>=2147483647L && HAVE_BUILTIN_CLZL
+#define builtin_ilog32_nz(v) \
+ (((int)sizeof(unsigned)*CHAR_BIT) - __builtin_clzl(v))
+#endif
+
+#if INT_MAX>=9223372036854775807LL && HAVE_BUILTIN_CLZ
+#define builtin_ilog64_nz(v) \
+ (((int)sizeof(unsigned)*CHAR_BIT) - __builtin_clz(v))
+#elif LONG_MAX>=9223372036854775807LL && HAVE_BUILTIN_CLZL
+#define builtin_ilog64_nz(v) \
+ (((int)sizeof(unsigned long)*CHAR_BIT) - __builtin_clzl(v))
+#elif HAVE_BUILTIN_CLZLL
+#define builtin_ilog64_nz(v) \
+ (((int)sizeof(unsigned long long)*CHAR_BIT) - __builtin_clzll(v))
+#endif
+
+#ifdef builtin_ilog32_nz
+#define ilog32(_v) (builtin_ilog32_nz(_v)&-!!(_v))
+#define ilog32_nz(_v) builtin_ilog32_nz(_v)
+#else
+#define ilog32_nz(_v) ilog32(_v)
+#define ilog32(_v) (IS_COMPILE_CONSTANT(_v) ? STATIC_ILOG_32(_v) : ilog32(_v))
+#endif /* builtin_ilog32_nz */
+
+#ifdef builtin_ilog64_nz
+#define ilog64(_v) (builtin_ilog64_nz(_v)&-!!(_v))
+#define ilog64_nz(_v) builtin_ilog64_nz(_v)
+#else
+#define ilog64_nz(_v) ilog64(_v)
+#define ilog64(_v) (IS_COMPILE_CONSTANT(_v) ? STATIC_ILOG_64(_v) : ilog64(_v))
+#endif /* builtin_ilog64_nz */
+
+/* Macros for evaluating compile-time constant ilog. */
+# define STATIC_ILOG0(_v) (!!(_v))
+# define STATIC_ILOG1(_v) (((_v)&0x2)?2:STATIC_ILOG0(_v))
+# define STATIC_ILOG2(_v) (((_v)&0xC)?2+STATIC_ILOG1((_v)>>2):STATIC_ILOG1(_v))
+# define STATIC_ILOG3(_v) \
+ (((_v)&0xF0)?4+STATIC_ILOG2((_v)>>4):STATIC_ILOG2(_v))
+# define STATIC_ILOG4(_v) \
+ (((_v)&0xFF00)?8+STATIC_ILOG3((_v)>>8):STATIC_ILOG3(_v))
+# define STATIC_ILOG5(_v) \
+ (((_v)&0xFFFF0000)?16+STATIC_ILOG4((_v)>>16):STATIC_ILOG4(_v))
+# define STATIC_ILOG6(_v) \
+ (((_v)&0xFFFFFFFF00000000ULL)?32+STATIC_ILOG5((_v)>>32):STATIC_ILOG5(_v))
+
+#endif /* _ilog_H */
diff --git a/src/rc-compat/v37/ccan/list.h b/src/rc-compat/v37/ccan/list.h
new file mode 100644
index 000000000000..f4006660f7ef
--- /dev/null
+++ b/src/rc-compat/v37/ccan/list.h
@@ -0,0 +1,842 @@
+/* Licensed under MIT - see LICENSE.MIT file for details */
+#ifndef CCAN_LIST_H
+#define CCAN_LIST_H
+//#define CCAN_LIST_DEBUG 1
+#include <stdbool.h>
+#include <assert.h>
+#include <ccan/str.h>
+#include <ccan/container_of.h>
+#include <ccan/check_type.h>
+
+/**
+ * struct list_node - an entry in a doubly-linked list
+ * @next: next entry (self if empty)
+ * @prev: previous entry (self if empty)
+ *
+ * This is used as an entry in a linked list.
+ * Example:
+ * struct child {
+ * const char *name;
+ * // Linked list of all us children.
+ * struct list_node list;
+ * };
+ */
+struct list_node
+{
+ struct list_node *next, *prev;
+};
+
+/**
+ * struct list_head - the head of a doubly-linked list
+ * @h: the list_head (containing next and prev pointers)
+ *
+ * This is used as the head of a linked list.
+ * Example:
+ * struct parent {
+ * const char *name;
+ * struct list_head children;
+ * unsigned int num_children;
+ * };
+ */
+struct list_head
+{
+ struct list_node n;
+};
+
+/**
+ * list_check - check head of a list for consistency
+ * @h: the list_head
+ * @abortstr: the location to print on aborting, or NULL.
+ *
+ * Because list_nodes have redundant information, consistency checking between
+ * the back and forward links can be done. This is useful as a debugging check.
+ * If @abortstr is non-NULL, that will be printed in a diagnostic if the list
+ * is inconsistent, and the function will abort.
+ *
+ * Returns the list head if the list is consistent, NULL if not (it
+ * can never return NULL if @abortstr is set).
+ *
+ * See also: list_check_node()
+ *
+ * Example:
+ * static void dump_parent(struct parent *p)
+ * {
+ * struct child *c;
+ *
+ * printf("%s (%u children):\n", p->name, p->num_children);
+ * list_check(&p->children, "bad child list");
+ * list_for_each(&p->children, c, list)
+ * printf(" -> %s\n", c->name);
+ * }
+ */
+struct list_head *list_check(const struct list_head *h, const char *abortstr);
+
+/**
+ * list_check_node - check node of a list for consistency
+ * @n: the list_node
+ * @abortstr: the location to print on aborting, or NULL.
+ *
+ * Check consistency of the list node is in (it must be in one).
+ *
+ * See also: list_check()
+ *
+ * Example:
+ * static void dump_child(const struct child *c)
+ * {
+ * list_check_node(&c->list, "bad child list");
+ * printf("%s\n", c->name);
+ * }
+ */
+struct list_node *list_check_node(const struct list_node *n,
+ const char *abortstr);
+
+#define LIST_LOC __FILE__ ":" stringify(__LINE__)
+#ifdef CCAN_LIST_DEBUG
+#define list_debug(h, loc) list_check((h), loc)
+#define list_debug_node(n, loc) list_check_node((n), loc)
+#else
+#define list_debug(h, loc) ((void)loc, h)
+#define list_debug_node(n, loc) ((void)loc, n)
+#endif
+
+/**
+ * LIST_HEAD_INIT - initializer for an empty list_head
+ * @name: the name of the list.
+ *
+ * Explicit initializer for an empty list.
+ *
+ * See also:
+ * LIST_HEAD, list_head_init()
+ *
+ * Example:
+ * static struct list_head my_list = LIST_HEAD_INIT(my_list);
+ */
+#define LIST_HEAD_INIT(name) { { &(name).n, &(name).n } }
+
+/**
+ * LIST_HEAD - define and initialize an empty list_head
+ * @name: the name of the list.
+ *
+ * The LIST_HEAD macro defines a list_head and initializes it to an empty
+ * list. It can be prepended by "static" to define a static list_head.
+ *
+ * See also:
+ * LIST_HEAD_INIT, list_head_init()
+ *
+ * Example:
+ * static LIST_HEAD(my_global_list);
+ */
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+/**
+ * list_head_init - initialize a list_head
+ * @h: the list_head to set to the empty list
+ *
+ * Example:
+ * ...
+ * struct parent *parent = malloc(sizeof(*parent));
+ *
+ * list_head_init(&parent->children);
+ * parent->num_children = 0;
+ */
+static inline void list_head_init(struct list_head *h)
+{
+ h->n.next = h->n.prev = &h->n;
+}
+
+/**
+ * list_node_init - initialize a list_node
+ * @n: the list_node to link to itself.
+ *
+ * You don't need to use this normally! But it lets you list_del(@n)
+ * safely.
+ */
+static inline void list_node_init(struct list_node *n)
+{
+ n->next = n->prev = n;
+}
+
+/**
+ * list_add_after - add an entry after an existing node in a linked list
+ * @h: the list_head to add the node to (for debugging)
+ * @p: the existing list_node to add the node after
+ * @n: the new list_node to add to the list.
+ *
+ * The existing list_node must already be a member of the list.
+ * The new list_node does not need to be initialized; it will be overwritten.
+ *
+ * Example:
+ * struct child c1, c2, c3;
+ * LIST_HEAD(h);
+ *
+ * list_add_tail(&h, &c1.list);
+ * list_add_tail(&h, &c3.list);
+ * list_add_after(&h, &c1.list, &c2.list);
+ */
+#define list_add_after(h, p, n) list_add_after_(h, p, n, LIST_LOC)
+static inline void list_add_after_(struct list_head *h,
+ struct list_node *p,
+ struct list_node *n,
+ const char *abortstr)
+{
+ n->next = p->next;
+ n->prev = p;
+ p->next->prev = n;
+ p->next = n;
+ (void)list_debug(h, abortstr);
+}
+
+/**
+ * list_add - add an entry at the start of a linked list.
+ * @h: the list_head to add the node to
+ * @n: the list_node to add to the list.
+ *
+ * The list_node does not need to be initialized; it will be overwritten.
+ * Example:
+ * struct child *child = malloc(sizeof(*child));
+ *
+ * child->name = "marvin";
+ * list_add(&parent->children, &child->list);
+ * parent->num_children++;
+ */
+#define list_add(h, n) list_add_(h, n, LIST_LOC)
+static inline void list_add_(struct list_head *h,
+ struct list_node *n,
+ const char *abortstr)
+{
+ list_add_after_(h, &h->n, n, abortstr);
+}
+
+/**
+ * list_add_before - add an entry before an existing node in a linked list
+ * @h: the list_head to add the node to (for debugging)
+ * @p: the existing list_node to add the node before
+ * @n: the new list_node to add to the list.
+ *
+ * The existing list_node must already be a member of the list.
+ * The new list_node does not need to be initialized; it will be overwritten.
+ *
+ * Example:
+ * list_head_init(&h);
+ * list_add_tail(&h, &c1.list);
+ * list_add_tail(&h, &c3.list);
+ * list_add_before(&h, &c3.list, &c2.list);
+ */
+#define list_add_before(h, p, n) list_add_before_(h, p, n, LIST_LOC)
+static inline void list_add_before_(struct list_head *h,
+ struct list_node *p,
+ struct list_node *n,
+ const char *abortstr)
+{
+ n->next = p;
+ n->prev = p->prev;
+ p->prev->next = n;
+ p->prev = n;
+ (void)list_debug(h, abortstr);
+}
+
+/**
+ * list_add_tail - add an entry at the end of a linked list.
+ * @h: the list_head to add the node to
+ * @n: the list_node to add to the list.
+ *
+ * The list_node does not need to be initialized; it will be overwritten.
+ * Example:
+ * list_add_tail(&parent->children, &child->list);
+ * parent->num_children++;
+ */
+#define list_add_tail(h, n) list_add_tail_(h, n, LIST_LOC)
+static inline void list_add_tail_(struct list_head *h,
+ struct list_node *n,
+ const char *abortstr)
+{
+ list_add_before_(h, &h->n, n, abortstr);
+}
+
+/**
+ * list_empty - is a list empty?
+ * @h: the list_head
+ *
+ * If the list is empty, returns true.
+ *
+ * Example:
+ * assert(list_empty(&parent->children) == (parent->num_children == 0));
+ */
+#define list_empty(h) list_empty_(h, LIST_LOC)
+static inline bool list_empty_(const struct list_head *h, const char* abortstr)
+{
+ (void)list_debug(h, abortstr);
+ return h->n.next == &h->n;
+}
+
+/**
+ * list_empty_nodebug - is a list empty (and don't perform debug checks)?
+ * @h: the list_head
+ *
+ * If the list is empty, returns true.
+ * This differs from list_empty() in that if CCAN_LIST_DEBUG is set it
+ * will NOT perform debug checks. Only use this function if you REALLY
+ * know what you're doing.
+ *
+ * Example:
+ * assert(list_empty_nodebug(&parent->children) == (parent->num_children == 0));
+ */
+#ifndef CCAN_LIST_DEBUG
+#define list_empty_nodebug(h) list_empty(h)
+#else
+static inline bool list_empty_nodebug(const struct list_head *h)
+{
+ return h->n.next == &h->n;
+}
+#endif
+
+/**
+ * list_empty_nocheck - is a list empty?
+ * @h: the list_head
+ *
+ * If the list is empty, returns true. This doesn't perform any
+ * debug check for list consistency, so it can be called without
+ * locks, racing with the list being modified. This is ok for
+ * checks where an incorrect result is not an issue (optimized
+ * bail out path for example).
+ */
+static inline bool list_empty_nocheck(const struct list_head *h)
+{
+ return h->n.next == &h->n;
+}
+
+/**
+ * list_del - delete an entry from an (unknown) linked list.
+ * @n: the list_node to delete from the list.
+ *
+ * Note that this leaves @n in an undefined state; it can be added to
+ * another list, but not deleted again.
+ *
+ * See also:
+ * list_del_from(), list_del_init()
+ *
+ * Example:
+ * list_del(&child->list);
+ * parent->num_children--;
+ */
+#define list_del(n) list_del_(n, LIST_LOC)
+static inline void list_del_(struct list_node *n, const char* abortstr)
+{
+ (void)list_debug_node(n, abortstr);
+ n->next->prev = n->prev;
+ n->prev->next = n->next;
+#ifdef CCAN_LIST_DEBUG
+ /* Catch use-after-del. */
+ n->next = n->prev = NULL;
+#endif
+}
+
+/**
+ * list_del_init - delete a node, and reset it so it can be deleted again.
+ * @n: the list_node to be deleted.
+ *
+ * list_del(@n) or list_del_init() again after this will be safe,
+ * which can be useful in some cases.
+ *
+ * See also:
+ * list_del_from(), list_del()
+ *
+ * Example:
+ * list_del_init(&child->list);
+ * parent->num_children--;
+ */
+#define list_del_init(n) list_del_init_(n, LIST_LOC)
+static inline void list_del_init_(struct list_node *n, const char *abortstr)
+{
+ list_del_(n, abortstr);
+ list_node_init(n);
+}
+
+/**
+ * list_del_from - delete an entry from a known linked list.
+ * @h: the list_head the node is in.
+ * @n: the list_node to delete from the list.
+ *
+ * This explicitly indicates which list a node is expected to be in,
+ * which is better documentation and can catch more bugs.
+ *
+ * See also: list_del()
+ *
+ * Example:
+ * list_del_from(&parent->children, &child->list);
+ * parent->num_children--;
+ */
+static inline void list_del_from(struct list_head *h, struct list_node *n)
+{
+#ifdef CCAN_LIST_DEBUG
+ {
+ /* Thorough check: make sure it was in list! */
+ struct list_node *i;
+ for (i = h->n.next; i != n; i = i->next)
+ assert(i != &h->n);
+ }
+#endif /* CCAN_LIST_DEBUG */
+
+ /* Quick test that catches a surprising number of bugs. */
+ assert(!list_empty(h));
+ list_del(n);
+}
+
+/**
+ * list_swap - swap out an entry from an (unknown) linked list for a new one.
+ * @o: the list_node to replace from the list.
+ * @n: the list_node to insert in place of the old one.
+ *
+ * Note that this leaves @o in an undefined state; it can be added to
+ * another list, but not deleted/swapped again.
+ *
+ * See also:
+ * list_del()
+ *
+ * Example:
+ * struct child x1, x2;
+ * LIST_HEAD(xh);
+ *
+ * list_add(&xh, &x1.list);
+ * list_swap(&x1.list, &x2.list);
+ */
+#define list_swap(o, n) list_swap_(o, n, LIST_LOC)
+static inline void list_swap_(struct list_node *o,
+ struct list_node *n,
+ const char* abortstr)
+{
+ (void)list_debug_node(o, abortstr);
+ *n = *o;
+ n->next->prev = n;
+ n->prev->next = n;
+#ifdef CCAN_LIST_DEBUG
+ /* Catch use-after-del. */
+ o->next = o->prev = NULL;
+#endif
+}
+
+/**
+ * list_entry - convert a list_node back into the structure containing it.
+ * @n: the list_node
+ * @type: the type of the entry
+ * @member: the list_node member of the type
+ *
+ * Example:
+ * // First list entry is children.next; convert back to child.
+ * child = list_entry(parent->children.n.next, struct child, list);
+ *
+ * See Also:
+ * list_top(), list_for_each()
+ */
+#define list_entry(n, type, member) container_of(n, type, member)
+
+/**
+ * list_top - get the first entry in a list
+ * @h: the list_head
+ * @type: the type of the entry
+ * @member: the list_node member of the type
+ *
+ * If the list is empty, returns NULL.
+ *
+ * Example:
+ * struct child *first;
+ * first = list_top(&parent->children, struct child, list);
+ * if (!first)
+ * printf("Empty list!\n");
+ */
+#define list_top(h, type, member) \
+ ((type *)list_top_((h), list_off_(type, member)))
+
+static inline const void *list_top_(const struct list_head *h, size_t off)
+{
+ if (list_empty(h))
+ return NULL;
+ return (const char *)h->n.next - off;
+}
+
+/**
+ * list_pop - remove the first entry in a list
+ * @h: the list_head
+ * @type: the type of the entry
+ * @member: the list_node member of the type
+ *
+ * If the list is empty, returns NULL.
+ *
+ * Example:
+ * struct child *one;
+ * one = list_pop(&parent->children, struct child, list);
+ * if (!one)
+ * printf("Empty list!\n");
+ */
+#define list_pop(h, type, member) \
+ ((type *)list_pop_((h), list_off_(type, member)))
+
+static inline const void *list_pop_(const struct list_head *h, size_t off)
+{
+ struct list_node *n;
+
+ if (list_empty(h))
+ return NULL;
+ n = h->n.next;
+ list_del(n);
+ return (const char *)n - off;
+}
+
+/**
+ * list_tail - get the last entry in a list
+ * @h: the list_head
+ * @type: the type of the entry
+ * @member: the list_node member of the type
+ *
+ * If the list is empty, returns NULL.
+ *
+ * Example:
+ * struct child *last;
+ * last = list_tail(&parent->children, struct child, list);
+ * if (!last)
+ * printf("Empty list!\n");
+ */
+#define list_tail(h, type, member) \
+ ((type *)list_tail_((h), list_off_(type, member)))
+
+static inline const void *list_tail_(const struct list_head *h, size_t off)
+{
+ if (list_empty(h))
+ return NULL;
+ return (const char *)h->n.prev - off;
+}
+
+/**
+ * list_for_each - iterate through a list.
+ * @h: the list_head (warning: evaluated multiple times!)
+ * @i: the structure containing the list_node
+ * @member: the list_node member of the structure
+ *
+ * This is a convenient wrapper to iterate @i over the entire list. It's
+ * a for loop, so you can break and continue as normal.
+ *
+ * Example:
+ * list_for_each(&parent->children, child, list)
+ * printf("Name: %s\n", child->name);
+ */
+#define list_for_each(h, i, member) \
+ list_for_each_off(h, i, list_off_var_(i, member))
+
+/**
+ * list_for_each_rev - iterate through a list backwards.
+ * @h: the list_head
+ * @i: the structure containing the list_node
+ * @member: the list_node member of the structure
+ *
+ * This is a convenient wrapper to iterate @i over the entire list. It's
+ * a for loop, so you can break and continue as normal.
+ *
+ * Example:
+ * list_for_each_rev(&parent->children, child, list)
+ * printf("Name: %s\n", child->name);
+ */
+#define list_for_each_rev(h, i, member) \
+ list_for_each_rev_off(h, i, list_off_var_(i, member))
+
+/**
+ * list_for_each_rev_safe - iterate through a list backwards,
+ * maybe during deletion
+ * @h: the list_head
+ * @i: the structure containing the list_node
+ * @nxt: the structure containing the list_node
+ * @member: the list_node member of the structure
+ *
+ * This is a convenient wrapper to iterate @i over the entire list backwards.
+ * It's a for loop, so you can break and continue as normal. The extra
+ * variable * @nxt is used to hold the next element, so you can delete @i
+ * from the list.
+ *
+ * Example:
+ * struct child *next;
+ * list_for_each_rev_safe(&parent->children, child, next, list) {
+ * printf("Name: %s\n", child->name);
+ * }
+ */
+#define list_for_each_rev_safe(h, i, nxt, member) \
+ list_for_each_rev_safe_off(h, i, nxt, list_off_var_(i, member))
+
+/**
+ * list_for_each_safe - iterate through a list, maybe during deletion
+ * @h: the list_head
+ * @i: the structure containing the list_node
+ * @nxt: the structure containing the list_node
+ * @member: the list_node member of the structure
+ *
+ * This is a convenient wrapper to iterate @i over the entire list. It's
+ * a for loop, so you can break and continue as normal. The extra variable
+ * @nxt is used to hold the next element, so you can delete @i from the list.
+ *
+ * Example:
+ * list_for_each_safe(&parent->children, child, next, list) {
+ * list_del(&child->list);
+ * parent->num_children--;
+ * }
+ */
+#define list_for_each_safe(h, i, nxt, member) \
+ list_for_each_safe_off(h, i, nxt, list_off_var_(i, member))
+
+/**
+ * list_next - get the next entry in a list
+ * @h: the list_head
+ * @i: a pointer to an entry in the list.
+ * @member: the list_node member of the structure
+ *
+ * If @i was the last entry in the list, returns NULL.
+ *
+ * Example:
+ * struct child *second;
+ * second = list_next(&parent->children, first, list);
+ * if (!second)
+ * printf("No second child!\n");
+ */
+#define list_next(h, i, member) \
+ ((list_typeof(i))list_entry_or_null(list_debug(h, \
+ __FILE__ ":" stringify(__LINE__)), \
+ (i)->member.next, \
+ list_off_var_((i), member)))
+
+/**
+ * list_prev - get the previous entry in a list
+ * @h: the list_head
+ * @i: a pointer to an entry in the list.
+ * @member: the list_node member of the structure
+ *
+ * If @i was the first entry in the list, returns NULL.
+ *
+ * Example:
+ * first = list_prev(&parent->children, second, list);
+ * if (!first)
+ * printf("Can't go back to first child?!\n");
+ */
+#define list_prev(h, i, member) \
+ ((list_typeof(i))list_entry_or_null(list_debug(h, \
+ __FILE__ ":" stringify(__LINE__)), \
+ (i)->member.prev, \
+ list_off_var_((i), member)))
+
+/**
+ * list_append_list - empty one list onto the end of another.
+ * @to: the list to append into
+ * @from: the list to empty.
+ *
+ * This takes the entire contents of @from and moves it to the end of
+ * @to. After this @from will be empty.
+ *
+ * Example:
+ * struct list_head adopter;
+ *
+ * list_append_list(&adopter, &parent->children);
+ * assert(list_empty(&parent->children));
+ * parent->num_children = 0;
+ */
+#define list_append_list(t, f) list_append_list_(t, f, \
+ __FILE__ ":" stringify(__LINE__))
+static inline void list_append_list_(struct list_head *to,
+ struct list_head *from,
+ const char *abortstr)
+{
+ struct list_node *from_tail = list_debug(from, abortstr)->n.prev;
+ struct list_node *to_tail = list_debug(to, abortstr)->n.prev;
+
+ /* Sew in head and entire list. */
+ to->n.prev = from_tail;
+ from_tail->next = &to->n;
+ to_tail->next = &from->n;
+ from->n.prev = to_tail;
+
+ /* Now remove head. */
+ list_del(&from->n);
+ list_head_init(from);
+}
+
+/**
+ * list_prepend_list - empty one list into the start of another.
+ * @to: the list to prepend into
+ * @from: the list to empty.
+ *
+ * This takes the entire contents of @from and moves it to the start
+ * of @to. After this @from will be empty.
+ *
+ * Example:
+ * list_prepend_list(&adopter, &parent->children);
+ * assert(list_empty(&parent->children));
+ * parent->num_children = 0;
+ */
+#define list_prepend_list(t, f) list_prepend_list_(t, f, LIST_LOC)
+static inline void list_prepend_list_(struct list_head *to,
+ struct list_head *from,
+ const char *abortstr)
+{
+ struct list_node *from_tail = list_debug(from, abortstr)->n.prev;
+ struct list_node *to_head = list_debug(to, abortstr)->n.next;
+
+ /* Sew in head and entire list. */
+ to->n.next = &from->n;
+ from->n.prev = &to->n;
+ to_head->prev = from_tail;
+ from_tail->next = to_head;
+
+ /* Now remove head. */
+ list_del(&from->n);
+ list_head_init(from);
+}
+
+/* internal macros, do not use directly */
+#define list_for_each_off_dir_(h, i, off, dir) \
+ for (i = list_node_to_off_(list_debug(h, LIST_LOC)->n.dir, \
+ (off)); \
+ list_node_from_off_((void *)i, (off)) != &(h)->n; \
+ i = list_node_to_off_(list_node_from_off_((void *)i, (off))->dir, \
+ (off)))
+
+#define list_for_each_safe_off_dir_(h, i, nxt, off, dir) \
+ for (i = list_node_to_off_(list_debug(h, LIST_LOC)->n.dir, \
+ (off)), \
+ nxt = list_node_to_off_(list_node_from_off_(i, (off))->dir, \
+ (off)); \
+ list_node_from_off_(i, (off)) != &(h)->n; \
+ i = nxt, \
+ nxt = list_node_to_off_(list_node_from_off_(i, (off))->dir, \
+ (off)))
+
+/**
+ * list_for_each_off - iterate through a list of memory regions.
+ * @h: the list_head
+ * @i: the pointer to a memory region wich contains list node data.
+ * @off: offset(relative to @i) at which list node data resides.
+ *
+ * This is a low-level wrapper to iterate @i over the entire list, used to
+ * implement all oher, more high-level, for-each constructs. It's a for loop,
+ * so you can break and continue as normal.
+ *
+ * WARNING! Being the low-level macro that it is, this wrapper doesn't know
+ * nor care about the type of @i. The only assumtion made is that @i points
+ * to a chunk of memory that at some @offset, relative to @i, contains a
+ * properly filled `struct node_list' which in turn contains pointers to
+ * memory chunks and it's turtles all the way down. Whith all that in mind
+ * remember that given the wrong pointer/offset couple this macro will
+ * happilly churn all you memory untill SEGFAULT stops it, in other words
+ * caveat emptor.
+ *
+ * It is worth mentioning that one of legitimate use-cases for that wrapper
+ * is operation on opaque types with known offset for `struct list_node'
+ * member(preferably 0), because it allows you not to disclose the type of
+ * @i.
+ *
+ * Example:
+ * list_for_each_off(&parent->children, child,
+ * offsetof(struct child, list))
+ * printf("Name: %s\n", child->name);
+ */
+#define list_for_each_off(h, i, off) \
+ list_for_each_off_dir_((h),(i),(off),next)
+
+/**
+ * list_for_each_rev_off - iterate through a list of memory regions backwards
+ * @h: the list_head
+ * @i: the pointer to a memory region wich contains list node data.
+ * @off: offset(relative to @i) at which list node data resides.
+ *
+ * See list_for_each_off for details
+ */
+#define list_for_each_rev_off(h, i, off) \
+ list_for_each_off_dir_((h),(i),(off),prev)
+
+/**
+ * list_for_each_safe_off - iterate through a list of memory regions, maybe
+ * during deletion
+ * @h: the list_head
+ * @i: the pointer to a memory region wich contains list node data.
+ * @nxt: the structure containing the list_node
+ * @off: offset(relative to @i) at which list node data resides.
+ *
+ * For details see `list_for_each_off' and `list_for_each_safe'
+ * descriptions.
+ *
+ * Example:
+ * list_for_each_safe_off(&parent->children, child,
+ * next, offsetof(struct child, list))
+ * printf("Name: %s\n", child->name);
+ */
+#define list_for_each_safe_off(h, i, nxt, off) \
+ list_for_each_safe_off_dir_((h),(i),(nxt),(off),next)
+
+/**
+ * list_for_each_rev_safe_off - iterate backwards through a list of
+ * memory regions, maybe during deletion
+ * @h: the list_head
+ * @i: the pointer to a memory region wich contains list node data.
+ * @nxt: the structure containing the list_node
+ * @off: offset(relative to @i) at which list node data resides.
+ *
+ * For details see `list_for_each_rev_off' and `list_for_each_rev_safe'
+ * descriptions.
+ *
+ * Example:
+ * list_for_each_rev_safe_off(&parent->children, child,
+ * next, offsetof(struct child, list))
+ * printf("Name: %s\n", child->name);
+ */
+#define list_for_each_rev_safe_off(h, i, nxt, off) \
+ list_for_each_safe_off_dir_((h),(i),(nxt),(off),prev)
+
+/* Other -off variants. */
+#define list_entry_off(n, type, off) \
+ ((type *)list_node_from_off_((n), (off)))
+
+#define list_head_off(h, type, off) \
+ ((type *)list_head_off((h), (off)))
+
+#define list_tail_off(h, type, off) \
+ ((type *)list_tail_((h), (off)))
+
+#define list_add_off(h, n, off) \
+ list_add((h), list_node_from_off_((n), (off)))
+
+#define list_del_off(n, off) \
+ list_del(list_node_from_off_((n), (off)))
+
+#define list_del_from_off(h, n, off) \
+ list_del_from(h, list_node_from_off_((n), (off)))
+
+/* Offset helper functions so we only single-evaluate. */
+static inline void *list_node_to_off_(struct list_node *node, size_t off)
+{
+ return (void *)((char *)node - off);
+}
+static inline struct list_node *list_node_from_off_(void *ptr, size_t off)
+{
+ return (struct list_node *)((char *)ptr + off);
+}
+
+/* Get the offset of the member, but make sure it's a list_node. */
+#define list_off_(type, member) \
+ (container_off(type, member) + \
+ check_type(((type *)0)->member, struct list_node))
+
+#define list_off_var_(var, member) \
+ (container_off_var(var, member) + \
+ check_type(var->member, struct list_node))
+
+#if HAVE_TYPEOF
+#define list_typeof(var) typeof(var)
+#else
+#define list_typeof(var) void *
+#endif
+
+/* Returns member, or NULL if at end of list. */
+static inline void *list_entry_or_null(const struct list_head *h,
+ const struct list_node *n,
+ size_t off)
+{
+ if (n == &h->n)
+ return NULL;
+ return (char *)n - off;
+}
+#endif /* CCAN_LIST_H */
diff --git a/src/rc-compat/v37/ccan/minmax.h b/src/rc-compat/v37/ccan/minmax.h
new file mode 100644
index 000000000000..ab6c55472b9a
--- /dev/null
+++ b/src/rc-compat/v37/ccan/minmax.h
@@ -0,0 +1,65 @@
+/* CC0 (Public domain) - see LICENSE.CC0 file for details */
+#ifndef CCAN_MINMAX_H
+#define CCAN_MINMAX_H
+
+#include "config.h"
+
+#include <ccan/build_assert.h>
+
+#if !HAVE_STATEMENT_EXPR || !HAVE_TYPEOF
+/*
+ * Without these, there's no way to avoid unsafe double evaluation of
+ * the arguments
+ */
+#error Sorry, minmax module requires statement expressions and typeof
+#endif
+
+#if HAVE_BUILTIN_TYPES_COMPATIBLE_P
+#define MINMAX_ASSERT_COMPATIBLE(a, b) \
+ BUILD_ASSERT(__builtin_types_compatible_p(a, b))
+#else
+#define MINMAX_ASSERT_COMPATIBLE(a, b) \
+ do { } while (0)
+#endif
+
+#define min(a, b) \
+ ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ MINMAX_ASSERT_COMPATIBLE(typeof(_a), typeof(_b)); \
+ _a < _b ? _a : _b; \
+ })
+
+#define max(a, b) \
+ ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ MINMAX_ASSERT_COMPATIBLE(typeof(_a), typeof(_b)); \
+ _a > _b ? _a : _b; \
+ })
+
+#define clamp(v, f, c) (max(min((v), (c)), (f)))
+
+
+#define min_t(t, a, b) \
+ ({ \
+ t _ta = (a); \
+ t _tb = (b); \
+ min(_ta, _tb); \
+ })
+#define max_t(t, a, b) \
+ ({ \
+ t _ta = (a); \
+ t _tb = (b); \
+ max(_ta, _tb); \
+ })
+
+#define clamp_t(t, v, f, c) \
+ ({ \
+ t _tv = (v); \
+ t _tf = (f); \
+ t _tc = (c); \
+ clamp(_tv, _tf, _tc); \
+ })
+
+#endif /* CCAN_MINMAX_H */
diff --git a/src/rc-compat/v37/ccan/str.h b/src/rc-compat/v37/ccan/str.h
new file mode 100644
index 000000000000..68c8a518b700
--- /dev/null
+++ b/src/rc-compat/v37/ccan/str.h
@@ -0,0 +1,228 @@
+/* CC0 (Public domain) - see LICENSE.CC0 file for details */
+#ifndef CCAN_STR_H
+#define CCAN_STR_H
+#include "config.h"
+#include <string.h>
+#include <stdbool.h>
+#include <limits.h>
+#include <ctype.h>
+
+/**
+ * streq - Are two strings equal?
+ * @a: first string
+ * @b: first string
+ *
+ * This macro is arguably more readable than "!strcmp(a, b)".
+ *
+ * Example:
+ * if (streq(somestring, ""))
+ * printf("String is empty!\n");
+ */
+#define streq(a,b) (strcmp((a),(b)) == 0)
+
+/**
+ * strstarts - Does this string start with this prefix?
+ * @str: string to test
+ * @prefix: prefix to look for at start of str
+ *
+ * Example:
+ * if (strstarts(somestring, "foo"))
+ * printf("String %s begins with 'foo'!\n", somestring);
+ */
+#define strstarts(str,prefix) (strncmp((str),(prefix),strlen(prefix)) == 0)
+
+/**
+ * strends - Does this string end with this postfix?
+ * @str: string to test
+ * @postfix: postfix to look for at end of str
+ *
+ * Example:
+ * if (strends(somestring, "foo"))
+ * printf("String %s end with 'foo'!\n", somestring);
+ */
+static inline bool strends(const char *str, const char *postfix)
+{
+ if (strlen(str) < strlen(postfix))
+ return false;
+
+ return streq(str + strlen(str) - strlen(postfix), postfix);
+}
+
+/**
+ * stringify - Turn expression into a string literal
+ * @expr: any C expression
+ *
+ * Example:
+ * #define PRINT_COND_IF_FALSE(cond) \
+ * ((cond) || printf("%s is false!", stringify(cond)))
+ */
+#define stringify(expr) stringify_1(expr)
+/* Double-indirection required to stringify expansions */
+#define stringify_1(expr) #expr
+
+/**
+ * strcount - Count number of (non-overlapping) occurrences of a substring.
+ * @haystack: a C string
+ * @needle: a substring
+ *
+ * Example:
+ * assert(strcount("aaa aaa", "a") == 6);
+ * assert(strcount("aaa aaa", "ab") == 0);
+ * assert(strcount("aaa aaa", "aa") == 2);
+ */
+size_t strcount(const char *haystack, const char *needle);
+
+/**
+ * STR_MAX_CHARS - Maximum possible size of numeric string for this type.
+ * @type_or_expr: a pointer or integer type or expression.
+ *
+ * This provides enough space for a nul-terminated string which represents the
+ * largest possible value for the type or expression.
+ *
+ * Note: The implementation adds extra space so hex values or negative
+ * values will fit (eg. sprintf(... "%p"). )
+ *
+ * Example:
+ * char str[STR_MAX_CHARS(int)];
+ *
+ * sprintf(str, "%i", 7);
+ */
+#define STR_MAX_CHARS(type_or_expr) \
+ ((sizeof(type_or_expr) * CHAR_BIT + 8) / 9 * 3 + 2 \
+ + STR_MAX_CHARS_TCHECK_(type_or_expr))
+
+#if HAVE_TYPEOF
+/* Only a simple type can have 0 assigned, so test that. */
+#define STR_MAX_CHARS_TCHECK_(type_or_expr) \
+ ({ typeof(type_or_expr) x = 0; (void)x; 0; })
+#else
+#define STR_MAX_CHARS_TCHECK_(type_or_expr) 0
+#endif
+
+/**
+ * cisalnum - isalnum() which takes a char (and doesn't accept EOF)
+ * @c: a character
+ *
+ * Surprisingly, the standard ctype.h isalnum() takes an int, which
+ * must have the value of EOF (-1) or an unsigned char. This variant
+ * takes a real char, and doesn't accept EOF.
+ */
+static inline bool cisalnum(char c)
+{
+ return isalnum((unsigned char)c);
+}
+static inline bool cisalpha(char c)
+{
+ return isalpha((unsigned char)c);
+}
+static inline bool cisascii(char c)
+{
+ return isascii((unsigned char)c);
+}
+#if HAVE_ISBLANK
+static inline bool cisblank(char c)
+{
+ return isblank((unsigned char)c);
+}
+#endif
+static inline bool ciscntrl(char c)
+{
+ return iscntrl((unsigned char)c);
+}
+static inline bool cisdigit(char c)
+{
+ return isdigit((unsigned char)c);
+}
+static inline bool cisgraph(char c)
+{
+ return isgraph((unsigned char)c);
+}
+static inline bool cislower(char c)
+{
+ return islower((unsigned char)c);
+}
+static inline bool cisprint(char c)
+{
+ return isprint((unsigned char)c);
+}
+static inline bool cispunct(char c)
+{
+ return ispunct((unsigned char)c);
+}
+static inline bool cisspace(char c)
+{
+ return isspace((unsigned char)c);
+}
+static inline bool cisupper(char c)
+{
+ return isupper((unsigned char)c);
+}
+static inline bool cisxdigit(char c)
+{
+ return isxdigit((unsigned char)c);
+}
+
+#include <ccan/str_debug.h>
+
+/* These checks force things out of line, hence they are under DEBUG. */
+#ifdef CCAN_STR_DEBUG
+#include <ccan/build_assert.h>
+
+/* These are commonly misused: they take -1 or an *unsigned* char value. */
+#undef isalnum
+#undef isalpha
+#undef isascii
+#undef isblank
+#undef iscntrl
+#undef isdigit
+#undef isgraph
+#undef islower
+#undef isprint
+#undef ispunct
+#undef isspace
+#undef isupper
+#undef isxdigit
+
+/* You can use a char if char is unsigned. */
+#if HAVE_BUILTIN_TYPES_COMPATIBLE_P && HAVE_TYPEOF
+#define str_check_arg_(i) \
+ ((i) + BUILD_ASSERT_OR_ZERO(!__builtin_types_compatible_p(typeof(i), \
+ char) \
+ || (char)255 > 0))
+#else
+#define str_check_arg_(i) (i)
+#endif
+
+#define isalnum(i) str_isalnum(str_check_arg_(i))
+#define isalpha(i) str_isalpha(str_check_arg_(i))
+#define isascii(i) str_isascii(str_check_arg_(i))
+#if HAVE_ISBLANK
+#define isblank(i) str_isblank(str_check_arg_(i))
+#endif
+#define iscntrl(i) str_iscntrl(str_check_arg_(i))
+#define isdigit(i) str_isdigit(str_check_arg_(i))
+#define isgraph(i) str_isgraph(str_check_arg_(i))
+#define islower(i) str_islower(str_check_arg_(i))
+#define isprint(i) str_isprint(str_check_arg_(i))
+#define ispunct(i) str_ispunct(str_check_arg_(i))
+#define isspace(i) str_isspace(str_check_arg_(i))
+#define isupper(i) str_isupper(str_check_arg_(i))
+#define isxdigit(i) str_isxdigit(str_check_arg_(i))
+
+#if HAVE_TYPEOF
+/* With GNU magic, we can make const-respecting standard string functions. */
+#undef strstr
+#undef strchr
+#undef strrchr
+
+/* + 0 is needed to decay array into pointer. */
+#define strstr(haystack, needle) \
+ ((typeof((haystack) + 0))str_strstr((haystack), (needle)))
+#define strchr(haystack, c) \
+ ((typeof((haystack) + 0))str_strchr((haystack), (c)))
+#define strrchr(haystack, c) \
+ ((typeof((haystack) + 0))str_strrchr((haystack), (c)))
+#endif
+#endif /* CCAN_STR_DEBUG */
+
+#endif /* CCAN_STR_H */
diff --git a/src/rc-compat/v37/ccan/str_debug.h b/src/rc-compat/v37/ccan/str_debug.h
new file mode 100644
index 000000000000..7a3343816f7f
--- /dev/null
+++ b/src/rc-compat/v37/ccan/str_debug.h
@@ -0,0 +1,30 @@
+/* CC0 (Public domain) - see LICENSE.CC0 file for details */
+#ifndef CCAN_STR_DEBUG_H
+#define CCAN_STR_DEBUG_H
+
+/* #define CCAN_STR_DEBUG 1 */
+
+#ifdef CCAN_STR_DEBUG
+/* Because we mug the real ones with macros, we need our own wrappers. */
+int str_isalnum(int i);
+int str_isalpha(int i);
+int str_isascii(int i);
+#if HAVE_ISBLANK
+int str_isblank(int i);
+#endif
+int str_iscntrl(int i);
+int str_isdigit(int i);
+int str_isgraph(int i);
+int str_islower(int i);
+int str_isprint(int i);
+int str_ispunct(int i);
+int str_isspace(int i);
+int str_isupper(int i);
+int str_isxdigit(int i);
+
+char *str_strstr(const char *haystack, const char *needle);
+char *str_strchr(const char *s, int c);
+char *str_strrchr(const char *s, int c);
+#endif /* CCAN_STR_DEBUG */
+
+#endif /* CCAN_STR_DEBUG_H */
diff --git a/src/rc-compat/v37/cmd_ioctl.h b/src/rc-compat/v37/cmd_ioctl.h
new file mode 100644
index 000000000000..d5889a16ecc3
--- /dev/null
+++ b/src/rc-compat/v37/cmd_ioctl.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __INFINIBAND_VERBS_IOCTL_H
+#define __INFINIBAND_VERBS_IOCTL_H
+
+#include <config.h>
+
+#include <stdint.h>
+#include <assert.h>
+#include <rdma/rdma_user_ioctl_cmds.h>
+#include <infiniband/verbs.h>
+#include <ccan/container_of.h>
+#include <util/compiler.h>
+
+static inline uint64_t ioctl_ptr_to_u64(const void *ptr)
+{
+ if (sizeof(ptr) == sizeof(uint64_t))
+ return (uintptr_t)ptr;
+
+ /*
+ * Some CPU architectures require sign extension when converting from
+ * a 32 bit to 64 bit pointer. This should match the kernel
+ * implementation of compat_ptr() for the architecture.
+ */
+#if defined(__tilegx__)
+ return (int64_t)(intptr_t)ptr;
+#else
+ return (uintptr_t)ptr;
+#endif
+}
+
+static inline void _scrub_ptr_attr(void **ptr)
+{
+#if UINTPTR_MAX == UINT64_MAX
+ /* Do nothing */
+#else
+ RDMA_UAPI_PTR(void *, data) *scrub_data;
+
+ scrub_data = container_of(ptr, typeof(*scrub_data), data);
+ scrub_data->data_data_u64 = ioctl_ptr_to_u64(scrub_data->data);
+#endif
+}
+
+#define scrub_ptr_attr(ptr) _scrub_ptr_attr((void **)(&ptr))
+
+/*
+ * The command buffer is organized as a linked list of blocks of attributes.
+ * Each stack frame allocates its block and then calls up toward to core code
+ * which will do the ioctl. The frame that does the ioctl calls the special
+ * FINAL variant which will allocate enough space to linearize the attribute
+ * buffer for the kernel.
+ *
+ * The current range of attributes to fill is next_attr -> last_attr.
+ */
+struct ibv_command_buffer {
+ struct ibv_command_buffer *next;
+ struct ib_uverbs_attr *next_attr;
+ struct ib_uverbs_attr *last_attr;
+ /*
+ * Used by the legacy write interface to keep track of where the UHW
+ * buffer is located and the 'headroom' space that the common code
+ * uses to construct the command header and common command struct
+ * directly before the drivers' UHW.
+ */
+ uint8_t uhw_in_idx;
+ uint8_t uhw_out_idx;
+ uint8_t uhw_in_headroom_dwords;
+ uint8_t uhw_out_headroom_dwords;
+
+ uint8_t buffer_error:1;
+ /*
+ * These flags control what execute_ioctl_fallback does if the kernel
+ * does not support ioctl
+ */
+ uint8_t fallback_require_ex:1;
+ uint8_t fallback_ioctl_only:1;
+ struct ib_uverbs_ioctl_hdr hdr;
+};
+
+enum {_UHW_NO_INDEX = 0xFF};
+
+/*
+ * Constructing an array of ibv_command_buffer is a reasonable way to expand
+ * the VLA in hdr.attrs on the stack and also allocate some internal state in
+ * a single contiguous stack memory region. It will over-allocate the region in
+ * some cases, but this approach allows the number of elements to be dynamic,
+ * and not fixed as a compile time constant.
+ */
+#define _IOCTL_NUM_CMDB(_num_attrs) \
+ ((sizeof(struct ibv_command_buffer) + \
+ sizeof(struct ib_uverbs_attr) * (_num_attrs) + \
+ sizeof(struct ibv_command_buffer) - 1) / \
+ sizeof(struct ibv_command_buffer))
+
+unsigned int __ioctl_final_num_attrs(unsigned int num_attrs,
+ struct ibv_command_buffer *link);
+
+/* If the user doesn't provide a link then don't create a VLA */
+#define _ioctl_final_num_attrs(_num_attrs, _link) \
+ ((__builtin_constant_p(!(_link)) && !(_link)) \
+ ? (_num_attrs) \
+ : __ioctl_final_num_attrs(_num_attrs, _link))
+
+#define _COMMAND_BUFFER_INIT(_hdr, _object_id, _method_id, _num_attrs, _link) \
+ ((struct ibv_command_buffer){ \
+ .hdr = \
+ { \
+ .object_id = (_object_id), \
+ .method_id = (_method_id), \
+ }, \
+ .next = _link, \
+ .uhw_in_idx = _UHW_NO_INDEX, \
+ .uhw_out_idx = _UHW_NO_INDEX, \
+ .next_attr = (_hdr).attrs, \
+ .last_attr = (_hdr).attrs + _num_attrs})
+
+/*
+ * C99 does not permit an initializer for VLAs, so this function does the init
+ * instead. It is called in the wonky way so that DELCARE_COMMAND_BUFFER can
+ * still be a 'variable', and we so we don't require C11 mode.
+ */
+static inline int _ioctl_init_cmdb(struct ibv_command_buffer *cmd,
+ uint16_t object_id, uint16_t method_id,
+ size_t num_attrs,
+ struct ibv_command_buffer *link)
+{
+ *cmd = _COMMAND_BUFFER_INIT(cmd->hdr, object_id, method_id, num_attrs,
+ link);
+ return 0;
+}
+
+/*
+ * Construct an IOCTL command buffer on the stack with enough space for
+ * _num_attrs elements. _num_attrs does not have to be a compile time constant.
+ * _link is a previous COMMAND_BUFFER in the call chain.
+ */
+#ifndef __CHECKER__
+#define DECLARE_COMMAND_BUFFER_LINK(_name, _object_id, _method_id, _num_attrs, \
+ _link) \
+ const unsigned int __##_name##total = \
+ _ioctl_final_num_attrs(_num_attrs, _link); \
+ struct ibv_command_buffer _name[_IOCTL_NUM_CMDB(__##_name##total)]; \
+ int __attribute__((unused)) __##_name##dummy = _ioctl_init_cmdb( \
+ _name, _object_id, _method_id, __##_name##total, _link)
+#else
+/*
+ * sparse enforces kernel rules which forbids VLAs. Make the VLA into a static
+ * array when running sparse. Don't actually run the sparse compile result.
+ */
+#define DECLARE_COMMAND_BUFFER_LINK(_name, _object_id, _method_id, _num_attrs, \
+ _link) \
+ struct ibv_command_buffer _name[10]; \
+ int __attribute__((unused)) __##_name##dummy = \
+ _ioctl_init_cmdb(_name, _object_id, _method_id, 10, _link)
+#endif
+
+#define DECLARE_COMMAND_BUFFER(_name, _object_id, _method_id, _num_attrs) \
+ DECLARE_COMMAND_BUFFER_LINK(_name, _object_id, _method_id, _num_attrs, \
+ NULL)
+
+int execute_ioctl(struct ibv_context *context, struct ibv_command_buffer *cmd);
+
+static inline struct ib_uverbs_attr *
+_ioctl_next_attr(struct ibv_command_buffer *cmd, uint16_t attr_id)
+{
+ struct ib_uverbs_attr *attr;
+
+ assert(cmd->next_attr < cmd->last_attr);
+ attr = cmd->next_attr++;
+
+ *attr = (struct ib_uverbs_attr){
+ .attr_id = attr_id,
+ /*
+ * All attributes default to mandatory. Wrapper the fill_*
+ * call in attr_optional() to make it optional.
+ */
+ .flags = UVERBS_ATTR_F_MANDATORY,
+ };
+
+ return attr;
+}
+
+/*
+ * This construction is insane, an expression with a side effect that returns
+ * from the calling function, but it is a non-invasive way to get the compiler
+ * to elide the IOCTL support in the backwards compat command functions
+ * without disturbing native ioctl support.
+ *
+ * A command function will set last_attr on the stack to NULL, and if it is
+ * coded properly, the compiler will prove that last_attr is never changed and
+ * elide the function. Unfortunately this penalizes native ioctl uses with the
+ * extra if overhead.
+ *
+ * For this reason, _ioctl_next_attr must never be called outside a fill
+ * function.
+ */
+#if VERBS_WRITE_ONLY
+#define _ioctl_next_attr(cmd, attr_id) \
+ ({ \
+ if (!((cmd)->last_attr)) \
+ return NULL; \
+ _ioctl_next_attr(cmd, attr_id); \
+ })
+#endif
+
+/* Make the attribute optional. */
+static inline struct ib_uverbs_attr *attr_optional(struct ib_uverbs_attr *attr)
+{
+ if (!attr)
+ return attr;
+
+ attr->flags &= ~UVERBS_ATTR_F_MANDATORY;
+ return attr;
+}
+
+/* Send attributes of kernel type UVERBS_ATTR_TYPE_IDR */
+static inline struct ib_uverbs_attr *
+fill_attr_in_obj(struct ibv_command_buffer *cmd, uint16_t attr_id, uint32_t idr)
+{
+ struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id);
+
+ /* UVERBS_ATTR_TYPE_IDR uses a 64 bit value for the idr # */
+ attr->data = idr;
+ return attr;
+}
+
+static inline struct ib_uverbs_attr *
+fill_attr_out_obj(struct ibv_command_buffer *cmd, uint16_t attr_id)
+{
+ return fill_attr_in_obj(cmd, attr_id, 0);
+}
+
+static inline uint32_t read_attr_obj(uint16_t attr_id,
+ struct ib_uverbs_attr *attr)
+{
+ assert(attr->attr_id == attr_id);
+ return attr->data;
+}
+
+/* Send attributes of kernel type UVERBS_ATTR_TYPE_PTR_IN */
+static inline struct ib_uverbs_attr *
+fill_attr_in(struct ibv_command_buffer *cmd, uint16_t attr_id, const void *data,
+ size_t len)
+{
+ struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id);
+
+ if (unlikely(len > UINT16_MAX))
+ cmd->buffer_error = 1;
+
+ attr->len = len;
+ if (len <= sizeof(uint64_t))
+ memcpy(&attr->data, data, len);
+ else
+ attr->data = ioctl_ptr_to_u64(data);
+
+ return attr;
+}
+
+#define fill_attr_in_ptr(cmd, attr_id, ptr) \
+ fill_attr_in(cmd, attr_id, ptr, sizeof(*ptr))
+
+/* Send attributes of various inline kernel types */
+
+static inline struct ib_uverbs_attr *
+fill_attr_in_uint64(struct ibv_command_buffer *cmd, uint16_t attr_id,
+ uint64_t data)
+{
+ struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id);
+
+ attr->len = sizeof(data);
+ attr->data = data;
+
+ return attr;
+}
+
+#define fill_attr_const_in(cmd, attr_id, _data) \
+ fill_attr_in_uint64(cmd, attr_id, _data)
+
+static inline struct ib_uverbs_attr *
+fill_attr_in_uint32(struct ibv_command_buffer *cmd, uint16_t attr_id,
+ uint32_t data)
+{
+ struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id);
+
+ attr->len = sizeof(data);
+ memcpy(&attr->data, &data, sizeof(data));
+
+ return attr;
+}
+
+static inline struct ib_uverbs_attr *
+fill_attr_in_fd(struct ibv_command_buffer *cmd, uint16_t attr_id, int fd)
+{
+ struct ib_uverbs_attr *attr;
+
+ if (fd == -1)
+ return NULL;
+
+ attr = _ioctl_next_attr(cmd, attr_id);
+ /* UVERBS_ATTR_TYPE_FD uses a 64 bit value for the idr # */
+ attr->data = fd;
+ return attr;
+}
+
+static inline struct ib_uverbs_attr *
+fill_attr_out_fd(struct ibv_command_buffer *cmd, uint16_t attr_id, int fd)
+{
+ struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id);
+
+ attr->data = 0;
+ return attr;
+}
+
+static inline int read_attr_fd(uint16_t attr_id, struct ib_uverbs_attr *attr)
+{
+ assert(attr->attr_id == attr_id);
+ /* The kernel cannot fail to create a FD here, it never returns -1 */
+ return attr->data;
+}
+
+/* Send attributes of kernel type UVERBS_ATTR_TYPE_PTR_OUT */
+static inline struct ib_uverbs_attr *
+fill_attr_out(struct ibv_command_buffer *cmd, uint16_t attr_id, void *data,
+ size_t len)
+{
+ struct ib_uverbs_attr *attr = _ioctl_next_attr(cmd, attr_id);
+
+ if (unlikely(len > UINT16_MAX))
+ cmd->buffer_error = 1;
+
+ attr->len = len;
+ attr->data = ioctl_ptr_to_u64(data);
+
+ return attr;
+}
+
+#define fill_attr_out_ptr(cmd, attr_id, ptr) \
+ fill_attr_out(cmd, attr_id, ptr, sizeof(*(ptr)))
+
+/* If size*nelems overflows size_t this returns SIZE_MAX */
+static inline size_t _array_len(size_t size, size_t nelems)
+{
+ if (size != 0 &&
+ SIZE_MAX / size <= nelems)
+ return SIZE_MAX;
+ return size * nelems;
+}
+
+#define fill_attr_out_ptr_array(cmd, attr_id, ptr, nelems) \
+ fill_attr_out(cmd, attr_id, ptr, _array_len(sizeof(*ptr), nelems))
+
+#define fill_attr_in_ptr_array(cmd, attr_id, ptr, nelems) \
+ fill_attr_in(cmd, attr_id, ptr, _array_len(sizeof(*ptr), nelems))
+
+static inline size_t __check_divide(size_t val, unsigned int div)
+{
+ assert(val % div == 0);
+ return val / div;
+}
+
+static inline struct ib_uverbs_attr *
+fill_attr_in_enum(struct ibv_command_buffer *cmd, uint16_t attr_id,
+ uint8_t elem_id, const void *data, size_t len)
+{
+ struct ib_uverbs_attr *attr;
+
+ attr = fill_attr_in(cmd, attr_id, data, len);
+ attr->attr_data.enum_data.elem_id = elem_id;
+
+ return attr;
+}
+
+/* Send attributes of kernel type UVERBS_ATTR_TYPE_IDRS_ARRAY */
+static inline struct ib_uverbs_attr *
+fill_attr_in_objs_arr(struct ibv_command_buffer *cmd, uint16_t attr_id,
+ const uint32_t *idrs_arr, size_t nelems)
+{
+ return fill_attr_in(cmd, attr_id, idrs_arr,
+ _array_len(sizeof(*idrs_arr), nelems));
+}
+
+#endif
diff --git a/src/rc-compat/v37/config.h b/src/rc-compat/v37/config.h
new file mode 100644
index 000000000000..4a63336a8a0d
--- /dev/null
+++ b/src/rc-compat/v37/config.h
@@ -0,0 +1,56 @@
+#ifndef CONFIG_H_IN
+#define CONFIG_H_IN
+
+#define HAVE_STATEMENT_EXPR 1
+#define HAVE_BUILTIN_TYPES_COMPATIBLE_P 1
+#define HAVE_TYPEOF 1
+#define HAVE_ISBLANK 1
+#define HAVE_BUILTIN_CLZ 1
+#define HAVE_BUILTIN_CLZL 1
+
+//#define PACKAGE_VERSION "37.3"
+
+// FIXME: Remove this, The cmake version hard-requires new style CLOEXEC support
+#define STREAM_CLOEXEC "e"
+
+#define RDMA_CDEV_DIR "/dev/infiniband"
+
+#define VERBS_PROVIDER_SUFFIX "-rdmav34.so"
+#define IBVERBS_PABI_VERSION 34
+
+// FIXME This has been supported in compilers forever, we should just fail to build on such old systems.
+#define HAVE_FUNC_ATTRIBUTE_ALWAYS_INLINE 1
+
+#define HAVE_FUNC_ATTRIBUTE_IFUNC 1
+
+/* #undef HAVE_FUNC_ATTRIBUTE_SYMVER */
+
+#define HAVE_WORKING_IF_H 1
+
+// Operating mode for symbol versions
+#define HAVE_FULL_SYMBOL_VERSIONS 1
+/* #undef HAVE_LIMITED_SYMBOL_VERSIONS */
+
+#define SIZEOF_LONG 8
+
+#if 3 == 1
+# define VERBS_IOCTL_ONLY 1
+# define VERBS_WRITE_ONLY 0
+#elif 3 == 2
+# define VERBS_IOCTL_ONLY 0
+# define VERBS_WRITE_ONLY 1
+#elif 3 == 3
+# define VERBS_IOCTL_ONLY 0
+# define VERBS_WRITE_ONLY 0
+#endif
+
+// Configuration defaults
+
+#define IBACM_SERVER_MODE_UNIX 0
+#define IBACM_SERVER_MODE_LOOP 1
+#define IBACM_SERVER_MODE_OPEN 2
+#define IBACM_SERVER_MODE_DEFAULT IBACM_SERVER_MODE_UNIX
+
+#define IBACM_ACME_PLUS_KERNEL_ONLY_DEFAULT 0
+
+#endif
diff --git a/src/rc-compat/v37/driver.h b/src/rc-compat/v37/driver.h
new file mode 100644
index 000000000000..46406cc433f6
--- /dev/null
+++ b/src/rc-compat/v37/driver.h
@@ -0,0 +1,755 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc. All rights reserved.
+ * Copyright (c) 2020 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef INFINIBAND_DRIVER_H
+#define INFINIBAND_DRIVER_H
+
+#include <stdatomic.h>
+#include <infiniband/verbs.h>
+#include "kern-abi.h"
+#include "cmd_ioctl.h"
+#include <ccan/list.h>
+#include "config.h"
+#include <stdbool.h>
+#include <rdma/rdma_user_ioctl_cmds.h>
+#include "cmd_ioctl.h"
+#include <sys/types.h>
+
+/* NOTE: Start of StarlingX addition */
+#define IBV_DEVICE_LIBRARY_EXTENSION rdmav34
+/* NOTE: End of StarlingX addition */
+
+struct verbs_device;
+
+enum {
+ VERBS_LOG_LEVEL_NONE,
+ VERBS_LOG_ERR,
+ VERBS_LOG_WARN,
+ VERBS_LOG_INFO,
+ VERBS_LOG_DEBUG,
+};
+
+void __verbs_log(struct verbs_context *ctx, uint32_t level,
+ const char *fmt, ...);
+
+#define verbs_log(ctx, level, format, arg...) \
+do { \
+ int tmp = errno; \
+ __verbs_log(ctx, level, "%s: %s:%d: " format, \
+ (ctx)->context.device->name, __func__, __LINE__, ##arg); \
+ errno = tmp; \
+} while (0)
+
+#define verbs_debug(ctx, format, arg...) \
+ verbs_log(ctx, VERBS_LOG_DEBUG, format, ##arg)
+
+#define verbs_info(ctx, format, arg...) \
+ verbs_log(ctx, VERBS_LOG_INFO, format, ##arg)
+
+#define verbs_warn(ctx, format, arg...) \
+ verbs_log(ctx, VERBS_LOG_WARN, format, ##arg)
+
+#define verbs_err(ctx, format, arg...) \
+ verbs_log(ctx, VERBS_LOG_ERR, format, ##arg)
+
+#ifdef VERBS_DEBUG
+#define verbs_log_datapath(ctx, level, format, arg...) \
+ verbs_log(ctx, level, format, ##arg)
+#else
+#define verbs_log_datapath(ctx, level, format, arg...) {}
+#endif
+
+#define verbs_debug_datapath(ctx, format, arg...) \
+ verbs_log_datapath(ctx, VERBS_LOG_DEBUG, format, ##arg)
+
+#define verbs_info_datapath(ctx, format, arg...) \
+ verbs_log_datapath(ctx, VERBS_LOG_INFO, format, ##arg)
+
+#define verbs_warn_datapath(ctx, format, arg...) \
+ verbs_log_datapath(ctx, VERBS_LOG_WARN, format, ##arg)
+
+#define verbs_err_datapath(ctx, format, arg...) \
+ verbs_log_datapath(ctx, VERBS_LOG_ERR, format, ##arg)
+
+enum verbs_xrcd_mask {
+ VERBS_XRCD_HANDLE = 1 << 0,
+ VERBS_XRCD_RESERVED = 1 << 1
+};
+
+enum create_cq_cmd_flags {
+ CREATE_CQ_CMD_FLAGS_TS_IGNORED_EX = 1 << 0,
+};
+
+struct verbs_xrcd {
+ struct ibv_xrcd xrcd;
+ uint32_t comp_mask;
+ uint32_t handle;
+};
+
+struct verbs_srq {
+ struct ibv_srq srq;
+ enum ibv_srq_type srq_type;
+ struct verbs_xrcd *xrcd;
+ struct ibv_cq *cq;
+ uint32_t srq_num;
+};
+
+enum verbs_qp_mask {
+ VERBS_QP_XRCD = 1 << 0,
+ VERBS_QP_EX = 1 << 1,
+};
+
+enum ibv_gid_type_sysfs {
+ IBV_GID_TYPE_SYSFS_IB_ROCE_V1,
+ IBV_GID_TYPE_SYSFS_ROCE_V2,
+};
+
+enum verbs_query_gid_attr_mask {
+ VERBS_QUERY_GID_ATTR_GID = 1 << 0,
+ VERBS_QUERY_GID_ATTR_TYPE = 1 << 1,
+ VERBS_QUERY_GID_ATTR_NDEV_IFINDEX = 1 << 2,
+};
+
+enum ibv_mr_type {
+ IBV_MR_TYPE_MR,
+ IBV_MR_TYPE_NULL_MR,
+ IBV_MR_TYPE_IMPORTED_MR,
+ IBV_MR_TYPE_DMABUF_MR,
+};
+
+struct verbs_mr {
+ struct ibv_mr ibv_mr;
+ enum ibv_mr_type mr_type;
+ int access;
+};
+
+static inline struct verbs_mr *verbs_get_mr(struct ibv_mr *mr)
+{
+ return container_of(mr, struct verbs_mr, ibv_mr);
+}
+
+struct verbs_qp {
+ union {
+ struct ibv_qp qp;
+ struct ibv_qp_ex qp_ex;
+ };
+ uint32_t comp_mask;
+ struct verbs_xrcd *xrcd;
+};
+static_assert(offsetof(struct ibv_qp_ex, qp_base) == 0, "Invalid qp layout");
+
+struct verbs_cq {
+ union {
+ struct ibv_cq cq;
+ struct ibv_cq_ex cq_ex;
+ };
+};
+
+enum ibv_flow_action_type {
+ IBV_FLOW_ACTION_UNSPECIFIED,
+ IBV_FLOW_ACTION_ESP = 1,
+};
+
+struct verbs_flow_action {
+ struct ibv_flow_action action;
+ uint32_t handle;
+ enum ibv_flow_action_type type;
+};
+
+struct verbs_dm {
+ struct ibv_dm dm;
+ uint32_t handle;
+};
+
+enum {
+ VERBS_MATCH_SENTINEL = 0,
+ VERBS_MATCH_PCI = 1,
+ VERBS_MATCH_MODALIAS = 2,
+ VERBS_MATCH_DRIVER_ID = 3,
+};
+
+struct verbs_match_ent {
+ void *driver_data;
+ union {
+ const char *modalias;
+ uint64_t driver_id;
+ } u;
+ uint16_t vendor;
+ uint16_t device;
+ uint8_t kind;
+};
+#define VERBS_DRIVER_ID(_id) \
+ { \
+ .u.driver_id = (_id), .kind = VERBS_MATCH_DRIVER_ID, \
+ }
+/* Note: New drivers should only use VERBS_DRIVER_ID, the below are for legacy
+ * drivers
+ */
+#define VERBS_PCI_MATCH(_vendor, _device, _data) \
+ { \
+ .driver_data = (void *)(_data), \
+ .vendor = (_vendor), \
+ .device = (_device), \
+ .kind = VERBS_MATCH_PCI, \
+ }
+
+#define VERBS_MODALIAS_MATCH(_mod_str, _data) \
+ { \
+ .driver_data = (void *)(_data), \
+ .u.modalias = (_mod_str), \
+ .kind = VERBS_MATCH_MODALIAS, \
+ }
+
+/* Matching on the IB device name is STRONGLY discouraged. This will only
+ * match if there is no device/modalias file available, and it will eventually
+ * be disabled entirely if the kernel supports renaming. Use is strongly
+ * discouraged.
+ */
+#define VERBS_NAME_MATCH(_name_prefix, _data) \
+ { \
+ .driver_data = (_data), \
+ .u.modalias = "rdma_device:*N" _name_prefix "*", \
+ .kind = VERBS_MATCH_MODALIAS, \
+ }
+
+enum {
+ VSYSFS_READ_MODALIAS = 1 << 0,
+ VSYSFS_READ_NODE_GUID = 1 << 1,
+};
+
+/* An rdma device detected in sysfs */
+struct verbs_sysfs_dev {
+ struct list_node entry;
+ void *provider_data;
+ const struct verbs_match_ent *match;
+ unsigned int flags;
+ char sysfs_name[IBV_SYSFS_NAME_MAX];
+ dev_t sysfs_cdev;
+ char ibdev_name[IBV_SYSFS_NAME_MAX];
+ char ibdev_path[IBV_SYSFS_PATH_MAX];
+ char modalias[512];
+ uint64_t node_guid;
+ uint32_t driver_id;
+ enum ibv_node_type node_type;
+ int ibdev_idx;
+ uint32_t num_ports;
+ uint32_t abi_ver;
+ struct timespec time_created;
+};
+
+/* Must change the PRIVATE IBVERBS_PRIVATE_ symbol if this is changed */
+struct verbs_device_ops {
+ const char *name;
+
+ uint32_t match_min_abi_version;
+ uint32_t match_max_abi_version;
+ const struct verbs_match_ent *match_table;
+ const struct verbs_device_ops **static_providers;
+
+ bool (*match_device)(struct verbs_sysfs_dev *sysfs_dev);
+
+ struct verbs_context *(*alloc_context)(struct ibv_device *device,
+ int cmd_fd,
+ void *private_data);
+ struct verbs_context *(*import_context)(struct ibv_device *device,
+ int cmd_fd);
+
+ struct verbs_device *(*alloc_device)(struct verbs_sysfs_dev *sysfs_dev);
+ void (*uninit_device)(struct verbs_device *device);
+};
+
+/* Must change the PRIVATE IBVERBS_PRIVATE_ symbol if this is changed */
+struct verbs_device {
+ struct ibv_device device; /* Must be first */
+ const struct verbs_device_ops *ops;
+ atomic_int refcount;
+ struct list_node entry;
+ struct verbs_sysfs_dev *sysfs;
+ uint64_t core_support;
+};
+
+struct verbs_counters {
+ struct ibv_counters counters;
+ uint32_t handle;
+};
+
+/*
+ * Must change the PRIVATE IBVERBS_PRIVATE_ symbol if this is changed. This is
+ * the union of every op the driver can support. If new elements are added to
+ * this structure then verbs_dummy_ops must also be updated.
+ *
+ * Keep sorted.
+ */
+struct verbs_context_ops {
+ int (*advise_mr)(struct ibv_pd *pd,
+ enum ibv_advise_mr_advice advice,
+ uint32_t flags,
+ struct ibv_sge *sg_list,
+ uint32_t num_sges);
+ struct ibv_dm *(*alloc_dm)(struct ibv_context *context,
+ struct ibv_alloc_dm_attr *attr);
+ struct ibv_mw *(*alloc_mw)(struct ibv_pd *pd, enum ibv_mw_type type);
+ struct ibv_mr *(*alloc_null_mr)(struct ibv_pd *pd);
+ struct ibv_pd *(*alloc_parent_domain)(
+ struct ibv_context *context,
+ struct ibv_parent_domain_init_attr *attr);
+ struct ibv_pd *(*alloc_pd)(struct ibv_context *context);
+ struct ibv_td *(*alloc_td)(struct ibv_context *context,
+ struct ibv_td_init_attr *init_attr);
+ void (*async_event)(struct ibv_context *context, struct ibv_async_event *event);
+ int (*attach_counters_point_flow)(struct ibv_counters *counters,
+ struct ibv_counter_attach_attr *attr,
+ struct ibv_flow *flow);
+ int (*attach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
+ uint16_t lid);
+ int (*bind_mw)(struct ibv_qp *qp, struct ibv_mw *mw,
+ struct ibv_mw_bind *mw_bind);
+ int (*close_xrcd)(struct ibv_xrcd *xrcd);
+ void (*cq_event)(struct ibv_cq *cq);
+ struct ibv_ah *(*create_ah)(struct ibv_pd *pd,
+ struct ibv_ah_attr *attr);
+ struct ibv_counters *(*create_counters)(struct ibv_context *context,
+ struct ibv_counters_init_attr *init_attr);
+ struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
+ struct ibv_comp_channel *channel,
+ int comp_vector);
+ struct ibv_cq_ex *(*create_cq_ex)(
+ struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *init_attr);
+ struct ibv_flow *(*create_flow)(struct ibv_qp *qp,
+ struct ibv_flow_attr *flow_attr);
+ struct ibv_flow_action *(*create_flow_action_esp)(struct ibv_context *context,
+ struct ibv_flow_action_esp_attr *attr);
+ struct ibv_qp *(*create_qp)(struct ibv_pd *pd,
+ struct ibv_qp_init_attr *attr);
+ struct ibv_qp *(*create_qp_ex)(
+ struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex);
+ struct ibv_rwq_ind_table *(*create_rwq_ind_table)(
+ struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr);
+ struct ibv_srq *(*create_srq)(struct ibv_pd *pd,
+ struct ibv_srq_init_attr *srq_init_attr);
+ struct ibv_srq *(*create_srq_ex)(
+ struct ibv_context *context,
+ struct ibv_srq_init_attr_ex *srq_init_attr_ex);
+ struct ibv_wq *(*create_wq)(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr);
+ int (*dealloc_mw)(struct ibv_mw *mw);
+ int (*dealloc_pd)(struct ibv_pd *pd);
+ int (*dealloc_td)(struct ibv_td *td);
+ int (*dereg_mr)(struct verbs_mr *vmr);
+ int (*destroy_ah)(struct ibv_ah *ah);
+ int (*destroy_counters)(struct ibv_counters *counters);
+ int (*destroy_cq)(struct ibv_cq *cq);
+ int (*destroy_flow)(struct ibv_flow *flow);
+ int (*destroy_flow_action)(struct ibv_flow_action *action);
+ int (*destroy_qp)(struct ibv_qp *qp);
+ int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
+ int (*destroy_srq)(struct ibv_srq *srq);
+ int (*destroy_wq)(struct ibv_wq *wq);
+ int (*detach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
+ uint16_t lid);
+ void (*free_context)(struct ibv_context *context);
+ int (*free_dm)(struct ibv_dm *dm);
+ int (*get_srq_num)(struct ibv_srq *srq, uint32_t *srq_num);
+ struct ibv_dm *(*import_dm)(struct ibv_context *context,
+ uint32_t dm_handle);
+ struct ibv_mr *(*import_mr)(struct ibv_pd *pd,
+ uint32_t mr_handle);
+ struct ibv_pd *(*import_pd)(struct ibv_context *context,
+ uint32_t pd_handle);
+ int (*modify_cq)(struct ibv_cq *cq, struct ibv_modify_cq_attr *attr);
+ int (*modify_flow_action_esp)(struct ibv_flow_action *action,
+ struct ibv_flow_action_esp_attr *attr);
+ int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask);
+ int (*modify_qp_rate_limit)(struct ibv_qp *qp,
+ struct ibv_qp_rate_limit_attr *attr);
+ int (*modify_srq)(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr,
+ int srq_attr_mask);
+ int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
+ struct ibv_qp *(*open_qp)(struct ibv_context *context,
+ struct ibv_qp_open_attr *attr);
+ struct ibv_xrcd *(*open_xrcd)(
+ struct ibv_context *context,
+ struct ibv_xrcd_init_attr *xrcd_init_attr);
+ int (*poll_cq)(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc);
+ int (*post_recv)(struct ibv_qp *qp, struct ibv_recv_wr *wr,
+ struct ibv_recv_wr **bad_wr);
+ int (*post_send)(struct ibv_qp *qp, struct ibv_send_wr *wr,
+ struct ibv_send_wr **bad_wr);
+ int (*post_srq_ops)(struct ibv_srq *srq, struct ibv_ops_wr *op,
+ struct ibv_ops_wr **bad_op);
+ int (*post_srq_recv)(struct ibv_srq *srq, struct ibv_recv_wr *recv_wr,
+ struct ibv_recv_wr **bad_recv_wr);
+ int (*query_device_ex)(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr,
+ size_t attr_size);
+ int (*query_ece)(struct ibv_qp *qp, struct ibv_ece *ece);
+ int (*query_port)(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr);
+ int (*query_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask, struct ibv_qp_init_attr *init_attr);
+ int (*query_qp_data_in_order)(struct ibv_qp *qp, enum ibv_wr_opcode op,
+ uint32_t flags);
+ int (*query_rt_values)(struct ibv_context *context,
+ struct ibv_values_ex *values);
+ int (*query_srq)(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr);
+ int (*read_counters)(struct ibv_counters *counters,
+ uint64_t *counters_value,
+ uint32_t ncounters,
+ uint32_t flags);
+ struct ibv_mr *(*reg_dm_mr)(struct ibv_pd *pd, struct ibv_dm *dm,
+ uint64_t dm_offset, size_t length,
+ unsigned int access);
+ struct ibv_mr *(*reg_dmabuf_mr)(struct ibv_pd *pd, uint64_t offset,
+ size_t length, uint64_t iova,
+ int fd, int access);
+ struct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr, size_t length,
+ uint64_t hca_va, int access);
+ int (*req_notify_cq)(struct ibv_cq *cq, int solicited_only);
+ int (*rereg_mr)(struct verbs_mr *vmr, int flags, struct ibv_pd *pd,
+ void *addr, size_t length, int access);
+ int (*resize_cq)(struct ibv_cq *cq, int cqe);
+ int (*set_ece)(struct ibv_qp *qp, struct ibv_ece *ece);
+ void (*unimport_dm)(struct ibv_dm *dm);
+ void (*unimport_mr)(struct ibv_mr *mr);
+ void (*unimport_pd)(struct ibv_pd *pd);
+};
+
+static inline struct verbs_device *
+verbs_get_device(const struct ibv_device *dev)
+{
+ return container_of(dev, struct verbs_device, device);
+}
+
+typedef struct verbs_device *(*verbs_driver_init_func)(const char *uverbs_sys_path,
+ int abi_version);
+
+/* Wire the IBVERBS_PRIVATE version number into the verbs_register_driver
+ * symbol name. This guarentees we link to the correct set of symbols even if
+ * statically linking or using a dynmic linker with symbol versioning turned
+ * off.
+ */
+#define ___make_verbs_register_driver(x) verbs_register_driver_ ## x
+#define __make_verbs_register_driver(x) ___make_verbs_register_driver(x)
+#define verbs_register_driver __make_verbs_register_driver(IBVERBS_PABI_VERSION)
+
+void verbs_register_driver(const struct verbs_device_ops *ops);
+
+/*
+ * Macro for providers to use to supply verbs_device_ops to the core code.
+ * This creates a global symbol for the provider structure to be used by the
+ * ibv_static_providers() machinery, and a global constructor for the dlopen
+ * machinery.
+ */
+#define PROVIDER_DRIVER(provider_name, drv_struct) \
+ extern const struct verbs_device_ops verbs_provider_##provider_name \
+ __attribute__((alias(stringify(drv_struct)))); \
+ static __attribute__((constructor)) void drv##__register_driver(void) \
+ { \
+ verbs_register_driver(&drv_struct); \
+ }
+
+void *_verbs_init_and_alloc_context(struct ibv_device *device, int cmd_fd,
+ size_t alloc_size,
+ struct verbs_context *context_offset,
+ uint32_t driver_id);
+
+#define verbs_init_and_alloc_context(ibdev, cmd_fd, drv_ctx_ptr, ctx_memb, \
+ driver_id) \
+ ((typeof(drv_ctx_ptr))_verbs_init_and_alloc_context( \
+ ibdev, cmd_fd, sizeof(*drv_ctx_ptr), \
+ &((typeof(drv_ctx_ptr))NULL)->ctx_memb, (driver_id)))
+
+int verbs_init_context(struct verbs_context *context_ex,
+ struct ibv_device *device, int cmd_fd,
+ uint32_t driver_id);
+void verbs_uninit_context(struct verbs_context *context);
+void verbs_set_ops(struct verbs_context *vctx,
+ const struct verbs_context_ops *ops);
+
+void verbs_init_cq(struct ibv_cq *cq, struct ibv_context *context,
+ struct ibv_comp_channel *channel,
+ void *cq_context);
+
+struct ibv_context *verbs_open_device(struct ibv_device *device,
+ void *private_data);
+int ibv_cmd_get_context(struct verbs_context *context,
+ struct ibv_get_context *cmd, size_t cmd_size,
+ struct ib_uverbs_get_context_resp *resp, size_t resp_size);
+int ibv_cmd_query_context(struct ibv_context *ctx,
+ struct ibv_command_buffer *driver);
+int ibv_cmd_create_flow_action_esp(struct ibv_context *ctx,
+ struct ibv_flow_action_esp_attr *attr,
+ struct verbs_flow_action *flow_action,
+ struct ibv_command_buffer *driver);
+int ibv_cmd_modify_flow_action_esp(struct verbs_flow_action *flow_action,
+ struct ibv_flow_action_esp_attr *attr,
+ struct ibv_command_buffer *driver);
+int ibv_cmd_query_device_any(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr, size_t attr_size,
+ struct ib_uverbs_ex_query_device_resp *resp,
+ size_t *resp_size);
+int ibv_cmd_query_port(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr,
+ struct ibv_query_port *cmd, size_t cmd_size);
+int ibv_cmd_alloc_async_fd(struct ibv_context *context);
+int ibv_cmd_alloc_pd(struct ibv_context *context, struct ibv_pd *pd,
+ struct ibv_alloc_pd *cmd, size_t cmd_size,
+ struct ib_uverbs_alloc_pd_resp *resp, size_t resp_size);
+int ibv_cmd_dealloc_pd(struct ibv_pd *pd);
+int ibv_cmd_open_xrcd(struct ibv_context *context, struct verbs_xrcd *xrcd,
+ int vxrcd_size,
+ struct ibv_xrcd_init_attr *attr,
+ struct ibv_open_xrcd *cmd, size_t cmd_size,
+ struct ib_uverbs_open_xrcd_resp *resp, size_t resp_size);
+int ibv_cmd_close_xrcd(struct verbs_xrcd *xrcd);
+int ibv_cmd_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
+ uint64_t hca_va, int access,
+ struct verbs_mr *vmr, struct ibv_reg_mr *cmd,
+ size_t cmd_size,
+ struct ib_uverbs_reg_mr_resp *resp, size_t resp_size);
+int ibv_cmd_rereg_mr(struct verbs_mr *vmr, uint32_t flags, void *addr,
+ size_t length, uint64_t hca_va, int access,
+ struct ibv_pd *pd, struct ibv_rereg_mr *cmd,
+ size_t cmd_sz, struct ib_uverbs_rereg_mr_resp *resp,
+ size_t resp_sz);
+int ibv_cmd_dereg_mr(struct verbs_mr *vmr);
+int ibv_cmd_query_mr(struct ibv_pd *pd, struct verbs_mr *vmr,
+ uint32_t mr_handle);
+int ibv_cmd_advise_mr(struct ibv_pd *pd,
+ enum ibv_advise_mr_advice advice,
+ uint32_t flags,
+ struct ibv_sge *sg_list,
+ uint32_t num_sge);
+int ibv_cmd_reg_dmabuf_mr(struct ibv_pd *pd, uint64_t offset, size_t length,
+ uint64_t iova, int fd, int access,
+ struct verbs_mr *vmr);
+int ibv_cmd_alloc_mw(struct ibv_pd *pd, enum ibv_mw_type type,
+ struct ibv_mw *mw, struct ibv_alloc_mw *cmd,
+ size_t cmd_size,
+ struct ib_uverbs_alloc_mw_resp *resp, size_t resp_size);
+int ibv_cmd_dealloc_mw(struct ibv_mw *mw);
+int ibv_cmd_create_cq(struct ibv_context *context, int cqe,
+ struct ibv_comp_channel *channel,
+ int comp_vector, struct ibv_cq *cq,
+ struct ibv_create_cq *cmd, size_t cmd_size,
+ struct ib_uverbs_create_cq_resp *resp, size_t resp_size);
+int ibv_cmd_create_cq_ex(struct ibv_context *context,
+ const struct ibv_cq_init_attr_ex *cq_attr,
+ struct verbs_cq *cq,
+ struct ibv_create_cq_ex *cmd,
+ size_t cmd_size,
+ struct ib_uverbs_ex_create_cq_resp *resp,
+ size_t resp_size,
+ uint32_t cmd_flags);
+int ibv_cmd_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
+int ibv_cmd_req_notify_cq(struct ibv_cq *cq, int solicited_only);
+int ibv_cmd_resize_cq(struct ibv_cq *cq, int cqe,
+ struct ibv_resize_cq *cmd, size_t cmd_size,
+ struct ib_uverbs_resize_cq_resp *resp, size_t resp_size);
+int ibv_cmd_destroy_cq(struct ibv_cq *cq);
+int ibv_cmd_modify_cq(struct ibv_cq *cq,
+ struct ibv_modify_cq_attr *attr,
+ struct ibv_modify_cq *cmd,
+ size_t cmd_size);
+
+int ibv_cmd_create_srq(struct ibv_pd *pd,
+ struct ibv_srq *srq, struct ibv_srq_init_attr *attr,
+ struct ibv_create_srq *cmd, size_t cmd_size,
+ struct ib_uverbs_create_srq_resp *resp, size_t resp_size);
+int ibv_cmd_create_srq_ex(struct ibv_context *context,
+ struct verbs_srq *srq,
+ struct ibv_srq_init_attr_ex *attr_ex,
+ struct ibv_create_xsrq *cmd, size_t cmd_size,
+ struct ib_uverbs_create_srq_resp *resp, size_t resp_size);
+int ibv_cmd_modify_srq(struct ibv_srq *srq,
+ struct ibv_srq_attr *srq_attr,
+ int srq_attr_mask,
+ struct ibv_modify_srq *cmd, size_t cmd_size);
+int ibv_cmd_query_srq(struct ibv_srq *srq,
+ struct ibv_srq_attr *srq_attr,
+ struct ibv_query_srq *cmd, size_t cmd_size);
+int ibv_cmd_destroy_srq(struct ibv_srq *srq);
+
+int ibv_cmd_create_qp(struct ibv_pd *pd,
+ struct ibv_qp *qp, struct ibv_qp_init_attr *attr,
+ struct ibv_create_qp *cmd, size_t cmd_size,
+ struct ib_uverbs_create_qp_resp *resp, size_t resp_size);
+int ibv_cmd_create_qp_ex(struct ibv_context *context,
+ struct verbs_qp *qp,
+ struct ibv_qp_init_attr_ex *attr_ex,
+ struct ibv_create_qp *cmd, size_t cmd_size,
+ struct ib_uverbs_create_qp_resp *resp, size_t resp_size);
+int ibv_cmd_create_qp_ex2(struct ibv_context *context,
+ struct verbs_qp *qp,
+ struct ibv_qp_init_attr_ex *qp_attr,
+ struct ibv_create_qp_ex *cmd,
+ size_t cmd_size,
+ struct ib_uverbs_ex_create_qp_resp *resp,
+ size_t resp_size);
+int ibv_cmd_open_qp(struct ibv_context *context,
+ struct verbs_qp *qp, int vqp_sz,
+ struct ibv_qp_open_attr *attr,
+ struct ibv_open_qp *cmd, size_t cmd_size,
+ struct ib_uverbs_create_qp_resp *resp, size_t resp_size);
+int ibv_cmd_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *qp_attr,
+ int attr_mask,
+ struct ibv_qp_init_attr *qp_init_attr,
+ struct ibv_query_qp *cmd, size_t cmd_size);
+int ibv_cmd_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask,
+ struct ibv_modify_qp *cmd, size_t cmd_size);
+int ibv_cmd_modify_qp_ex(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask, struct ibv_modify_qp_ex *cmd,
+ size_t cmd_size,
+ struct ib_uverbs_ex_modify_qp_resp *resp,
+ size_t resp_size);
+int ibv_cmd_destroy_qp(struct ibv_qp *qp);
+int ibv_cmd_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
+ struct ibv_send_wr **bad_wr);
+int ibv_cmd_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
+ struct ibv_recv_wr **bad_wr);
+int ibv_cmd_post_srq_recv(struct ibv_srq *srq, struct ibv_recv_wr *wr,
+ struct ibv_recv_wr **bad_wr);
+int ibv_cmd_create_ah(struct ibv_pd *pd, struct ibv_ah *ah,
+ struct ibv_ah_attr *attr,
+ struct ib_uverbs_create_ah_resp *resp,
+ size_t resp_size);
+int ibv_cmd_destroy_ah(struct ibv_ah *ah);
+int ibv_cmd_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
+int ibv_cmd_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
+
+int ibv_cmd_create_flow(struct ibv_qp *qp,
+ struct ibv_flow *flow_id,
+ struct ibv_flow_attr *flow_attr,
+ void *ucmd,
+ size_t ucmd_size);
+int ibv_cmd_destroy_flow(struct ibv_flow *flow_id);
+int ibv_cmd_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr,
+ struct ibv_wq *wq,
+ struct ibv_create_wq *cmd,
+ size_t cmd_size,
+ struct ib_uverbs_ex_create_wq_resp *resp,
+ size_t resp_size);
+
+int ibv_cmd_destroy_flow_action(struct verbs_flow_action *action);
+int ibv_cmd_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *attr,
+ struct ibv_modify_wq *cmd, size_t cmd_size);
+int ibv_cmd_destroy_wq(struct ibv_wq *wq);
+int ibv_cmd_create_rwq_ind_table(struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr,
+ struct ibv_rwq_ind_table *rwq_ind_table,
+ struct ib_uverbs_ex_create_rwq_ind_table_resp *resp,
+ size_t resp_size);
+int ibv_cmd_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table);
+int ibv_cmd_create_counters(struct ibv_context *context,
+ struct ibv_counters_init_attr *init_attr,
+ struct verbs_counters *vcounters,
+ struct ibv_command_buffer *link);
+int ibv_cmd_destroy_counters(struct verbs_counters *vcounters);
+int ibv_cmd_read_counters(struct verbs_counters *vcounters,
+ uint64_t *counters_value,
+ uint32_t ncounters,
+ uint32_t flags,
+ struct ibv_command_buffer *link);
+int ibv_dontfork_range(void *base, size_t size);
+int ibv_dofork_range(void *base, size_t size);
+int ibv_cmd_alloc_dm(struct ibv_context *ctx,
+ const struct ibv_alloc_dm_attr *dm_attr,
+ struct verbs_dm *dm,
+ struct ibv_command_buffer *link);
+int ibv_cmd_free_dm(struct verbs_dm *dm);
+int ibv_cmd_reg_dm_mr(struct ibv_pd *pd, struct verbs_dm *dm,
+ uint64_t offset, size_t length,
+ unsigned int access, struct verbs_mr *vmr,
+ struct ibv_command_buffer *link);
+
+int __ibv_query_gid_ex(struct ibv_context *context, uint32_t port_num,
+ uint32_t gid_index, struct ibv_gid_entry *entry,
+ uint32_t flags, size_t entry_size,
+ uint32_t fallback_attr_mask);
+
+/*
+ * sysfs helper functions
+ */
+const char *ibv_get_sysfs_path(void);
+
+int ibv_read_sysfs_file(const char *dir, const char *file,
+ char *buf, size_t size);
+int ibv_read_sysfs_file_at(int dirfd, const char *file, char *buf, size_t size);
+int ibv_read_ibdev_sysfs_file(char *buf, size_t size,
+ struct verbs_sysfs_dev *sysfs_dev,
+ const char *fnfmt, ...)
+ __attribute__((format(printf, 4, 5)));
+
+static inline bool check_comp_mask(uint64_t input, uint64_t supported)
+{
+ return (input & ~supported) == 0;
+}
+
+int ibv_query_gid_type(struct ibv_context *context, uint8_t port_num,
+ unsigned int index, enum ibv_gid_type_sysfs *type);
+
+static inline int
+ibv_check_alloc_parent_domain(struct ibv_parent_domain_init_attr *attr)
+{
+ /* A valid protection domain must be set */
+ if (!attr->pd) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize the ibv_pd which is being used as a parent_domain. From the
+ * perspective of the core code the new ibv_pd is completely interchangeable
+ * with the passed contained_pd.
+ */
+static inline void ibv_initialize_parent_domain(struct ibv_pd *parent_domain,
+ struct ibv_pd *contained_pd)
+{
+ parent_domain->context = contained_pd->context;
+ parent_domain->handle = contained_pd->handle;
+}
+
+#endif /* INFINIBAND_DRIVER_H */
diff --git a/src/rc-compat/v37/ib_user_verbs.h b/src/rc-compat/v37/ib_user_verbs.h
new file mode 100644
index 000000000000..7ee73a0652f1
--- /dev/null
+++ b/src/rc-compat/v37/ib_user_verbs.h
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc. All rights reserved.
+ * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef IB_USER_VERBS_H
+#define IB_USER_VERBS_H
+
+#include <linux/types.h>
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define IB_USER_VERBS_ABI_VERSION 6
+#define IB_USER_VERBS_CMD_THRESHOLD 50
+
+enum ib_uverbs_write_cmds {
+ IB_USER_VERBS_CMD_GET_CONTEXT,
+ IB_USER_VERBS_CMD_QUERY_DEVICE,
+ IB_USER_VERBS_CMD_QUERY_PORT,
+ IB_USER_VERBS_CMD_ALLOC_PD,
+ IB_USER_VERBS_CMD_DEALLOC_PD,
+ IB_USER_VERBS_CMD_CREATE_AH,
+ IB_USER_VERBS_CMD_MODIFY_AH,
+ IB_USER_VERBS_CMD_QUERY_AH,
+ IB_USER_VERBS_CMD_DESTROY_AH,
+ IB_USER_VERBS_CMD_REG_MR,
+ IB_USER_VERBS_CMD_REG_SMR,
+ IB_USER_VERBS_CMD_REREG_MR,
+ IB_USER_VERBS_CMD_QUERY_MR,
+ IB_USER_VERBS_CMD_DEREG_MR,
+ IB_USER_VERBS_CMD_ALLOC_MW,
+ IB_USER_VERBS_CMD_BIND_MW,
+ IB_USER_VERBS_CMD_DEALLOC_MW,
+ IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
+ IB_USER_VERBS_CMD_CREATE_CQ,
+ IB_USER_VERBS_CMD_RESIZE_CQ,
+ IB_USER_VERBS_CMD_DESTROY_CQ,
+ IB_USER_VERBS_CMD_POLL_CQ,
+ IB_USER_VERBS_CMD_PEEK_CQ,
+ IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
+ IB_USER_VERBS_CMD_CREATE_QP,
+ IB_USER_VERBS_CMD_QUERY_QP,
+ IB_USER_VERBS_CMD_MODIFY_QP,
+ IB_USER_VERBS_CMD_DESTROY_QP,
+ IB_USER_VERBS_CMD_POST_SEND,
+ IB_USER_VERBS_CMD_POST_RECV,
+ IB_USER_VERBS_CMD_ATTACH_MCAST,
+ IB_USER_VERBS_CMD_DETACH_MCAST,
+ IB_USER_VERBS_CMD_CREATE_SRQ,
+ IB_USER_VERBS_CMD_MODIFY_SRQ,
+ IB_USER_VERBS_CMD_QUERY_SRQ,
+ IB_USER_VERBS_CMD_DESTROY_SRQ,
+ IB_USER_VERBS_CMD_POST_SRQ_RECV,
+ IB_USER_VERBS_CMD_OPEN_XRCD,
+ IB_USER_VERBS_CMD_CLOSE_XRCD,
+ IB_USER_VERBS_CMD_CREATE_XSRQ,
+ IB_USER_VERBS_CMD_OPEN_QP,
+};
+
+enum {
+ IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
+ IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ,
+ IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP,
+ IB_USER_VERBS_EX_CMD_MODIFY_QP = IB_USER_VERBS_CMD_MODIFY_QP,
+ IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
+ IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
+ IB_USER_VERBS_EX_CMD_CREATE_WQ,
+ IB_USER_VERBS_EX_CMD_MODIFY_WQ,
+ IB_USER_VERBS_EX_CMD_DESTROY_WQ,
+ IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
+ IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
+ IB_USER_VERBS_EX_CMD_MODIFY_CQ
+};
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * Specifically:
+ * - Do not use pointer types -- pass pointers in __u64 instead.
+ * - Make sure that any structure larger than 4 bytes is padded to a
+ * multiple of 8 bytes. Otherwise the structure size will be
+ * different between 32-bit and 64-bit architectures.
+ */
+
+struct ib_uverbs_async_event_desc {
+ __aligned_u64 element;
+ __u32 event_type; /* enum ib_event_type */
+ __u32 reserved;
+};
+
+struct ib_uverbs_comp_event_desc {
+ __aligned_u64 cq_handle;
+};
+
+struct ib_uverbs_cq_moderation_caps {
+ __u16 max_cq_moderation_count;
+ __u16 max_cq_moderation_period;
+ __u32 reserved;
+};
+
+/*
+ * All commands from userspace should start with a __u32 command field
+ * followed by __u16 in_words and out_words fields (which give the
+ * length of the command block and response buffer if any in 32-bit
+ * words). The kernel driver will read these fields first and read
+ * the rest of the command struct based on these value.
+ */
+
+#define IB_USER_VERBS_CMD_COMMAND_MASK 0xff
+#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80000000u
+
+struct ib_uverbs_cmd_hdr {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+};
+
+struct ib_uverbs_ex_cmd_hdr {
+ __aligned_u64 response;
+ __u16 provider_in_words;
+ __u16 provider_out_words;
+ __u32 cmd_hdr_reserved;
+};
+
+struct ib_uverbs_get_context {
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_get_context_resp {
+ __u32 async_fd;
+ __u32 num_comp_vectors;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_query_device {
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_query_device_resp {
+ __aligned_u64 fw_ver;
+ __be64 node_guid;
+ __be64 sys_image_guid;
+ __aligned_u64 max_mr_size;
+ __aligned_u64 page_size_cap;
+ __u32 vendor_id;
+ __u32 vendor_part_id;
+ __u32 hw_ver;
+ __u32 max_qp;
+ __u32 max_qp_wr;
+ __u32 device_cap_flags;
+ __u32 max_sge;
+ __u32 max_sge_rd;
+ __u32 max_cq;
+ __u32 max_cqe;
+ __u32 max_mr;
+ __u32 max_pd;
+ __u32 max_qp_rd_atom;
+ __u32 max_ee_rd_atom;
+ __u32 max_res_rd_atom;
+ __u32 max_qp_init_rd_atom;
+ __u32 max_ee_init_rd_atom;
+ __u32 atomic_cap;
+ __u32 max_ee;
+ __u32 max_rdd;
+ __u32 max_mw;
+ __u32 max_raw_ipv6_qp;
+ __u32 max_raw_ethy_qp;
+ __u32 max_mcast_grp;
+ __u32 max_mcast_qp_attach;
+ __u32 max_total_mcast_qp_attach;
+ __u32 max_ah;
+ __u32 max_fmr;
+ __u32 max_map_per_fmr;
+ __u32 max_srq;
+ __u32 max_srq_wr;
+ __u32 max_srq_sge;
+ __u16 max_pkeys;
+ __u8 local_ca_ack_delay;
+ __u8 phys_port_cnt;
+ __u8 reserved[4];
+};
+
+struct ib_uverbs_ex_query_device {
+ __u32 comp_mask;
+ __u32 reserved;
+};
+
+struct ib_uverbs_odp_caps {
+ __aligned_u64 general_caps;
+ struct {
+ __u32 rc_odp_caps;
+ __u32 uc_odp_caps;
+ __u32 ud_odp_caps;
+ } per_transport_caps;
+ __u32 reserved;
+};
+
+struct ib_uverbs_rss_caps {
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_UD
+ */
+ __u32 supported_qpts;
+ __u32 max_rwq_indirection_tables;
+ __u32 max_rwq_indirection_table_size;
+ __u32 reserved;
+};
+
+struct ib_uverbs_tm_caps {
+ /* Max size of rendezvous request message */
+ __u32 max_rndv_hdr_size;
+ /* Max number of entries in tag matching list */
+ __u32 max_num_tags;
+ /* TM flags */
+ __u32 flags;
+ /* Max number of outstanding list operations */
+ __u32 max_ops;
+ /* Max number of SGE in tag matching entry */
+ __u32 max_sge;
+ __u32 reserved;
+};
+
+struct ib_uverbs_ex_query_device_resp {
+ struct ib_uverbs_query_device_resp base;
+ __u32 comp_mask;
+ __u32 response_length;
+ struct ib_uverbs_odp_caps odp_caps;
+ __aligned_u64 timestamp_mask;
+ __aligned_u64 hca_core_clock; /* in KHZ */
+ __aligned_u64 device_cap_flags_ex;
+ struct ib_uverbs_rss_caps rss_caps;
+ __u32 max_wq_type_rq;
+ __u32 raw_packet_caps;
+ struct ib_uverbs_tm_caps tm_caps;
+ struct ib_uverbs_cq_moderation_caps cq_moderation_caps;
+ __aligned_u64 max_dm_size;
+ __u32 xrc_odp_caps;
+ __u32 reserved;
+};
+
+struct ib_uverbs_query_port {
+ __aligned_u64 response;
+ __u8 port_num;
+ __u8 reserved[7];
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_query_port_resp {
+ __u32 port_cap_flags; /* see ib_uverbs_query_port_cap_flags */
+ __u32 max_msg_sz;
+ __u32 bad_pkey_cntr;
+ __u32 qkey_viol_cntr;
+ __u32 gid_tbl_len;
+ __u16 pkey_tbl_len;
+ __u16 lid;
+ __u16 sm_lid;
+ __u8 state;
+ __u8 max_mtu;
+ __u8 active_mtu;
+ __u8 lmc;
+ __u8 max_vl_num;
+ __u8 sm_sl;
+ __u8 subnet_timeout;
+ __u8 init_type_reply;
+ __u8 active_width;
+ __u8 active_speed;
+ __u8 phys_state;
+ __u8 link_layer;
+ __u8 flags; /* see ib_uverbs_query_port_flags */
+ __u8 reserved;
+};
+
+struct ib_uverbs_alloc_pd {
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_alloc_pd_resp {
+ __u32 pd_handle;
+ __u32 driver_data[0];
+};
+
+struct ib_uverbs_dealloc_pd {
+ __u32 pd_handle;
+};
+
+struct ib_uverbs_open_xrcd {
+ __aligned_u64 response;
+ __u32 fd;
+ __u32 oflags;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_open_xrcd_resp {
+ __u32 xrcd_handle;
+ __u32 driver_data[0];
+};
+
+struct ib_uverbs_close_xrcd {
+ __u32 xrcd_handle;
+};
+
+struct ib_uverbs_reg_mr {
+ __aligned_u64 response;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 hca_va;
+ __u32 pd_handle;
+ __u32 access_flags;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_reg_mr_resp {
+ __u32 mr_handle;
+ __u32 lkey;
+ __u32 rkey;
+ __u32 driver_data[0];
+};
+
+struct ib_uverbs_rereg_mr {
+ __aligned_u64 response;
+ __u32 mr_handle;
+ __u32 flags;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 hca_va;
+ __u32 pd_handle;
+ __u32 access_flags;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_rereg_mr_resp {
+ __u32 lkey;
+ __u32 rkey;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_dereg_mr {
+ __u32 mr_handle;
+};
+
+struct ib_uverbs_alloc_mw {
+ __aligned_u64 response;
+ __u32 pd_handle;
+ __u8 mw_type;
+ __u8 reserved[3];
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_alloc_mw_resp {
+ __u32 mw_handle;
+ __u32 rkey;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_dealloc_mw {
+ __u32 mw_handle;
+};
+
+struct ib_uverbs_create_comp_channel {
+ __aligned_u64 response;
+};
+
+struct ib_uverbs_create_comp_channel_resp {
+ __u32 fd;
+};
+
+struct ib_uverbs_create_cq {
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
+ __u32 cqe;
+ __u32 comp_vector;
+ __s32 comp_channel;
+ __u32 reserved;
+ __aligned_u64 driver_data[0];
+};
+
+enum ib_uverbs_ex_create_cq_flags {
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
+ IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
+};
+
+struct ib_uverbs_ex_create_cq {
+ __aligned_u64 user_handle;
+ __u32 cqe;
+ __u32 comp_vector;
+ __s32 comp_channel;
+ __u32 comp_mask;
+ __u32 flags; /* bitmask of ib_uverbs_ex_create_cq_flags */
+ __u32 reserved;
+};
+
+struct ib_uverbs_create_cq_resp {
+ __u32 cq_handle;
+ __u32 cqe;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_ex_create_cq_resp {
+ struct ib_uverbs_create_cq_resp base;
+ __u32 comp_mask;
+ __u32 response_length;
+};
+
+struct ib_uverbs_resize_cq {
+ __aligned_u64 response;
+ __u32 cq_handle;
+ __u32 cqe;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_resize_cq_resp {
+ __u32 cqe;
+ __u32 reserved;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_poll_cq {
+ __aligned_u64 response;
+ __u32 cq_handle;
+ __u32 ne;
+};
+
+enum ib_uverbs_wc_opcode {
+ IB_UVERBS_WC_SEND = 0,
+ IB_UVERBS_WC_RDMA_WRITE = 1,
+ IB_UVERBS_WC_RDMA_READ = 2,
+ IB_UVERBS_WC_COMP_SWAP = 3,
+ IB_UVERBS_WC_FETCH_ADD = 4,
+ IB_UVERBS_WC_BIND_MW = 5,
+ IB_UVERBS_WC_LOCAL_INV = 6,
+ IB_UVERBS_WC_TSO = 7,
+};
+
+struct ib_uverbs_wc {
+ __aligned_u64 wr_id;
+ __u32 status;
+ __u32 opcode;
+ __u32 vendor_err;
+ __u32 byte_len;
+ union {
+ __be32 imm_data;
+ __u32 invalidate_rkey;
+ } ex;
+ __u32 qp_num;
+ __u32 src_qp;
+ __u32 wc_flags;
+ __u16 pkey_index;
+ __u16 slid;
+ __u8 sl;
+ __u8 dlid_path_bits;
+ __u8 port_num;
+ __u8 reserved;
+};
+
+struct ib_uverbs_poll_cq_resp {
+ __u32 count;
+ __u32 reserved;
+ struct ib_uverbs_wc wc[0];
+};
+
+struct ib_uverbs_req_notify_cq {
+ __u32 cq_handle;
+ __u32 solicited_only;
+};
+
+struct ib_uverbs_destroy_cq {
+ __aligned_u64 response;
+ __u32 cq_handle;
+ __u32 reserved;
+};
+
+struct ib_uverbs_destroy_cq_resp {
+ __u32 comp_events_reported;
+ __u32 async_events_reported;
+};
+
+struct ib_uverbs_global_route {
+ __u8 dgid[16];
+ __u32 flow_label;
+ __u8 sgid_index;
+ __u8 hop_limit;
+ __u8 traffic_class;
+ __u8 reserved;
+};
+
+struct ib_uverbs_ah_attr {
+ struct ib_uverbs_global_route grh;
+ __u16 dlid;
+ __u8 sl;
+ __u8 src_path_bits;
+ __u8 static_rate;
+ __u8 is_global;
+ __u8 port_num;
+ __u8 reserved;
+};
+
+struct ib_uverbs_qp_attr {
+ __u32 qp_attr_mask;
+ __u32 qp_state;
+ __u32 cur_qp_state;
+ __u32 path_mtu;
+ __u32 path_mig_state;
+ __u32 qkey;
+ __u32 rq_psn;
+ __u32 sq_psn;
+ __u32 dest_qp_num;
+ __u32 qp_access_flags;
+
+ struct ib_uverbs_ah_attr ah_attr;
+ struct ib_uverbs_ah_attr alt_ah_attr;
+
+ /* ib_qp_cap */
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+
+ __u16 pkey_index;
+ __u16 alt_pkey_index;
+ __u8 en_sqd_async_notify;
+ __u8 sq_draining;
+ __u8 max_rd_atomic;
+ __u8 max_dest_rd_atomic;
+ __u8 min_rnr_timer;
+ __u8 port_num;
+ __u8 timeout;
+ __u8 retry_cnt;
+ __u8 rnr_retry;
+ __u8 alt_port_num;
+ __u8 alt_timeout;
+ __u8 reserved[5];
+};
+
+struct ib_uverbs_create_qp {
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
+ __u32 pd_handle;
+ __u32 send_cq_handle;
+ __u32 recv_cq_handle;
+ __u32 srq_handle;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+ __u8 sq_sig_all;
+ __u8 qp_type;
+ __u8 is_srq;
+ __u8 reserved;
+ __aligned_u64 driver_data[0];
+};
+
+enum ib_uverbs_create_qp_mask {
+ IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1UL << 0,
+};
+
+enum {
+ IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE,
+};
+
+struct ib_uverbs_ex_create_qp {
+ __aligned_u64 user_handle;
+ __u32 pd_handle;
+ __u32 send_cq_handle;
+ __u32 recv_cq_handle;
+ __u32 srq_handle;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+ __u8 sq_sig_all;
+ __u8 qp_type;
+ __u8 is_srq;
+ __u8 reserved;
+ __u32 comp_mask;
+ __u32 create_flags;
+ __u32 rwq_ind_tbl_handle;
+ __u32 source_qpn;
+};
+
+struct ib_uverbs_open_qp {
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
+ __u32 pd_handle;
+ __u32 qpn;
+ __u8 qp_type;
+ __u8 reserved[7];
+ __aligned_u64 driver_data[0];
+};
+
+/* also used for open response */
+struct ib_uverbs_create_qp_resp {
+ __u32 qp_handle;
+ __u32 qpn;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+ __u32 reserved;
+ __u32 driver_data[0];
+};
+
+struct ib_uverbs_ex_create_qp_resp {
+ struct ib_uverbs_create_qp_resp base;
+ __u32 comp_mask;
+ __u32 response_length;
+};
+
+/*
+ * This struct needs to remain a multiple of 8 bytes to keep the
+ * alignment of the modify QP parameters.
+ */
+struct ib_uverbs_qp_dest {
+ __u8 dgid[16];
+ __u32 flow_label;
+ __u16 dlid;
+ __u16 reserved;
+ __u8 sgid_index;
+ __u8 hop_limit;
+ __u8 traffic_class;
+ __u8 sl;
+ __u8 src_path_bits;
+ __u8 static_rate;
+ __u8 is_global;
+ __u8 port_num;
+};
+
+struct ib_uverbs_query_qp {
+ __aligned_u64 response;
+ __u32 qp_handle;
+ __u32 attr_mask;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_query_qp_resp {
+ struct ib_uverbs_qp_dest dest;
+ struct ib_uverbs_qp_dest alt_dest;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+ __u32 qkey;
+ __u32 rq_psn;
+ __u32 sq_psn;
+ __u32 dest_qp_num;
+ __u32 qp_access_flags;
+ __u16 pkey_index;
+ __u16 alt_pkey_index;
+ __u8 qp_state;
+ __u8 cur_qp_state;
+ __u8 path_mtu;
+ __u8 path_mig_state;
+ __u8 sq_draining;
+ __u8 max_rd_atomic;
+ __u8 max_dest_rd_atomic;
+ __u8 min_rnr_timer;
+ __u8 port_num;
+ __u8 timeout;
+ __u8 retry_cnt;
+ __u8 rnr_retry;
+ __u8 alt_port_num;
+ __u8 alt_timeout;
+ __u8 sq_sig_all;
+ __u8 reserved[5];
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_modify_qp {
+ struct ib_uverbs_qp_dest dest;
+ struct ib_uverbs_qp_dest alt_dest;
+ __u32 qp_handle;
+ __u32 attr_mask;
+ __u32 qkey;
+ __u32 rq_psn;
+ __u32 sq_psn;
+ __u32 dest_qp_num;
+ __u32 qp_access_flags;
+ __u16 pkey_index;
+ __u16 alt_pkey_index;
+ __u8 qp_state;
+ __u8 cur_qp_state;
+ __u8 path_mtu;
+ __u8 path_mig_state;
+ __u8 en_sqd_async_notify;
+ __u8 max_rd_atomic;
+ __u8 max_dest_rd_atomic;
+ __u8 min_rnr_timer;
+ __u8 port_num;
+ __u8 timeout;
+ __u8 retry_cnt;
+ __u8 rnr_retry;
+ __u8 alt_port_num;
+ __u8 alt_timeout;
+ __u8 reserved[2];
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_ex_modify_qp {
+ struct ib_uverbs_modify_qp base;
+ __u32 rate_limit;
+ __u32 reserved;
+};
+
+struct ib_uverbs_ex_modify_qp_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+};
+
+struct ib_uverbs_destroy_qp {
+ __aligned_u64 response;
+ __u32 qp_handle;
+ __u32 reserved;
+};
+
+struct ib_uverbs_destroy_qp_resp {
+ __u32 events_reported;
+};
+
+/*
+ * The ib_uverbs_sge structure isn't used anywhere, since we assume
+ * the ib_sge structure is packed the same way on 32-bit and 64-bit
+ * architectures in both kernel and user space. It's just here to
+ * document the ABI.
+ */
+struct ib_uverbs_sge {
+ __aligned_u64 addr;
+ __u32 length;
+ __u32 lkey;
+};
+
+enum ib_uverbs_wr_opcode {
+ IB_UVERBS_WR_RDMA_WRITE = 0,
+ IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1,
+ IB_UVERBS_WR_SEND = 2,
+ IB_UVERBS_WR_SEND_WITH_IMM = 3,
+ IB_UVERBS_WR_RDMA_READ = 4,
+ IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5,
+ IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6,
+ IB_UVERBS_WR_LOCAL_INV = 7,
+ IB_UVERBS_WR_BIND_MW = 8,
+ IB_UVERBS_WR_SEND_WITH_INV = 9,
+ IB_UVERBS_WR_TSO = 10,
+ IB_UVERBS_WR_RDMA_READ_WITH_INV = 11,
+ IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12,
+ IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13,
+ /* Review enum ib_wr_opcode before modifying this */
+};
+
+struct ib_uverbs_send_wr {
+ __aligned_u64 wr_id;
+ __u32 num_sge;
+ __u32 opcode; /* see enum ib_uverbs_wr_opcode */
+ __u32 send_flags;
+ union {
+ __be32 imm_data;
+ __u32 invalidate_rkey;
+ } ex;
+ union {
+ struct {
+ __aligned_u64 remote_addr;
+ __u32 rkey;
+ __u32 reserved;
+ } rdma;
+ struct {
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
+ __u32 rkey;
+ __u32 reserved;
+ } atomic;
+ struct {
+ __u32 ah;
+ __u32 remote_qpn;
+ __u32 remote_qkey;
+ __u32 reserved;
+ } ud;
+ } wr;
+};
+
+struct ib_uverbs_post_send {
+ __aligned_u64 response;
+ __u32 qp_handle;
+ __u32 wr_count;
+ __u32 sge_count;
+ __u32 wqe_size;
+ struct ib_uverbs_send_wr send_wr[0];
+};
+
+struct ib_uverbs_post_send_resp {
+ __u32 bad_wr;
+};
+
+struct ib_uverbs_recv_wr {
+ __aligned_u64 wr_id;
+ __u32 num_sge;
+ __u32 reserved;
+};
+
+struct ib_uverbs_post_recv {
+ __aligned_u64 response;
+ __u32 qp_handle;
+ __u32 wr_count;
+ __u32 sge_count;
+ __u32 wqe_size;
+ struct ib_uverbs_recv_wr recv_wr[0];
+};
+
+struct ib_uverbs_post_recv_resp {
+ __u32 bad_wr;
+};
+
+struct ib_uverbs_post_srq_recv {
+ __aligned_u64 response;
+ __u32 srq_handle;
+ __u32 wr_count;
+ __u32 sge_count;
+ __u32 wqe_size;
+ struct ib_uverbs_recv_wr recv[0];
+};
+
+struct ib_uverbs_post_srq_recv_resp {
+ __u32 bad_wr;
+};
+
+struct ib_uverbs_create_ah {
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
+ __u32 pd_handle;
+ __u32 reserved;
+ struct ib_uverbs_ah_attr attr;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_create_ah_resp {
+ __u32 ah_handle;
+ __u32 driver_data[0];
+};
+
+struct ib_uverbs_destroy_ah {
+ __u32 ah_handle;
+};
+
+struct ib_uverbs_attach_mcast {
+ __u8 gid[16];
+ __u32 qp_handle;
+ __u16 mlid;
+ __u16 reserved;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_detach_mcast {
+ __u8 gid[16];
+ __u32 qp_handle;
+ __u16 mlid;
+ __u16 reserved;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_flow_spec_hdr {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ /* followed by flow_spec */
+ __aligned_u64 flow_spec_data[0];
+};
+
+struct ib_uverbs_flow_eth_filter {
+ __u8 dst_mac[6];
+ __u8 src_mac[6];
+ __be16 ether_type;
+ __be16 vlan_tag;
+};
+
+struct ib_uverbs_flow_spec_eth {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_eth_filter val;
+ struct ib_uverbs_flow_eth_filter mask;
+};
+
+struct ib_uverbs_flow_ipv4_filter {
+ __be32 src_ip;
+ __be32 dst_ip;
+ __u8 proto;
+ __u8 tos;
+ __u8 ttl;
+ __u8 flags;
+};
+
+struct ib_uverbs_flow_spec_ipv4 {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_ipv4_filter val;
+ struct ib_uverbs_flow_ipv4_filter mask;
+};
+
+struct ib_uverbs_flow_tcp_udp_filter {
+ __be16 dst_port;
+ __be16 src_port;
+};
+
+struct ib_uverbs_flow_spec_tcp_udp {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_tcp_udp_filter val;
+ struct ib_uverbs_flow_tcp_udp_filter mask;
+};
+
+struct ib_uverbs_flow_ipv6_filter {
+ __u8 src_ip[16];
+ __u8 dst_ip[16];
+ __be32 flow_label;
+ __u8 next_hdr;
+ __u8 traffic_class;
+ __u8 hop_limit;
+ __u8 reserved;
+};
+
+struct ib_uverbs_flow_spec_ipv6 {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_ipv6_filter val;
+ struct ib_uverbs_flow_ipv6_filter mask;
+};
+
+struct ib_uverbs_flow_spec_action_tag {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ __u32 tag_id;
+ __u32 reserved1;
+};
+
+struct ib_uverbs_flow_spec_action_drop {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+};
+
+struct ib_uverbs_flow_spec_action_handle {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ __u32 handle;
+ __u32 reserved1;
+};
+
+struct ib_uverbs_flow_spec_action_count {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ __u32 handle;
+ __u32 reserved1;
+};
+
+struct ib_uverbs_flow_tunnel_filter {
+ __be32 tunnel_id;
+};
+
+struct ib_uverbs_flow_spec_tunnel {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_tunnel_filter val;
+ struct ib_uverbs_flow_tunnel_filter mask;
+};
+
+struct ib_uverbs_flow_spec_esp_filter {
+ __u32 spi;
+ __u32 seq;
+};
+
+struct ib_uverbs_flow_spec_esp {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_spec_esp_filter val;
+ struct ib_uverbs_flow_spec_esp_filter mask;
+};
+
+struct ib_uverbs_flow_gre_filter {
+ /* c_ks_res0_ver field is bits 0-15 in offset 0 of a standard GRE header:
+ * bit 0 - C - checksum bit.
+ * bit 1 - reserved. set to 0.
+ * bit 2 - key bit.
+ * bit 3 - sequence number bit.
+ * bits 4:12 - reserved. set to 0.
+ * bits 13:15 - GRE version.
+ */
+ __be16 c_ks_res0_ver;
+ __be16 protocol;
+ __be32 key;
+};
+
+struct ib_uverbs_flow_spec_gre {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_gre_filter val;
+ struct ib_uverbs_flow_gre_filter mask;
+};
+
+struct ib_uverbs_flow_mpls_filter {
+ /* The field includes the entire MPLS label:
+ * bits 0:19 - label field.
+ * bits 20:22 - traffic class field.
+ * bits 23 - bottom of stack bit.
+ * bits 24:31 - ttl field.
+ */
+ __be32 label;
+};
+
+struct ib_uverbs_flow_spec_mpls {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_mpls_filter val;
+ struct ib_uverbs_flow_mpls_filter mask;
+};
+
+struct ib_uverbs_flow_attr {
+ __u32 type;
+ __u16 size;
+ __u16 priority;
+ __u8 num_of_specs;
+ __u8 reserved[2];
+ __u8 port;
+ __u32 flags;
+ /* Following are the optional layers according to user request
+ * struct ib_flow_spec_xxx
+ * struct ib_flow_spec_yyy
+ */
+ struct ib_uverbs_flow_spec_hdr flow_specs[0];
+};
+
+struct ib_uverbs_create_flow {
+ __u32 comp_mask;
+ __u32 qp_handle;
+ struct ib_uverbs_flow_attr flow_attr;
+};
+
+struct ib_uverbs_create_flow_resp {
+ __u32 comp_mask;
+ __u32 flow_handle;
+};
+
+struct ib_uverbs_destroy_flow {
+ __u32 comp_mask;
+ __u32 flow_handle;
+};
+
+struct ib_uverbs_create_srq {
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
+ __u32 pd_handle;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 srq_limit;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_create_xsrq {
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
+ __u32 srq_type;
+ __u32 pd_handle;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 srq_limit;
+ __u32 max_num_tags;
+ __u32 xrcd_handle;
+ __u32 cq_handle;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_create_srq_resp {
+ __u32 srq_handle;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 srqn;
+ __u32 driver_data[0];
+};
+
+struct ib_uverbs_modify_srq {
+ __u32 srq_handle;
+ __u32 attr_mask;
+ __u32 max_wr;
+ __u32 srq_limit;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_query_srq {
+ __aligned_u64 response;
+ __u32 srq_handle;
+ __u32 reserved;
+ __aligned_u64 driver_data[0];
+};
+
+struct ib_uverbs_query_srq_resp {
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 srq_limit;
+ __u32 reserved;
+};
+
+struct ib_uverbs_destroy_srq {
+ __aligned_u64 response;
+ __u32 srq_handle;
+ __u32 reserved;
+};
+
+struct ib_uverbs_destroy_srq_resp {
+ __u32 events_reported;
+};
+
+struct ib_uverbs_ex_create_wq {
+ __u32 comp_mask;
+ __u32 wq_type;
+ __aligned_u64 user_handle;
+ __u32 pd_handle;
+ __u32 cq_handle;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 create_flags; /* Use enum ib_wq_flags */
+ __u32 reserved;
+};
+
+struct ib_uverbs_ex_create_wq_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ __u32 wq_handle;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 wqn;
+};
+
+struct ib_uverbs_ex_destroy_wq {
+ __u32 comp_mask;
+ __u32 wq_handle;
+};
+
+struct ib_uverbs_ex_destroy_wq_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ __u32 events_reported;
+ __u32 reserved;
+};
+
+struct ib_uverbs_ex_modify_wq {
+ __u32 attr_mask;
+ __u32 wq_handle;
+ __u32 wq_state;
+ __u32 curr_wq_state;
+ __u32 flags; /* Use enum ib_wq_flags */
+ __u32 flags_mask; /* Use enum ib_wq_flags */
+};
+
+/* Prevent memory allocation rather than max expected size */
+#define IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE 0x0d
+struct ib_uverbs_ex_create_rwq_ind_table {
+ __u32 comp_mask;
+ __u32 log_ind_tbl_size;
+ /* Following are the wq handles according to log_ind_tbl_size
+ * wq_handle1
+ * wq_handle2
+ */
+ __u32 wq_handles[0];
+};
+
+struct ib_uverbs_ex_create_rwq_ind_table_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ __u32 ind_tbl_handle;
+ __u32 ind_tbl_num;
+};
+
+struct ib_uverbs_ex_destroy_rwq_ind_table {
+ __u32 comp_mask;
+ __u32 ind_tbl_handle;
+};
+
+struct ib_uverbs_cq_moderation {
+ __u16 cq_count;
+ __u16 cq_period;
+};
+
+struct ib_uverbs_ex_modify_cq {
+ __u32 cq_handle;
+ __u32 attr_mask;
+ struct ib_uverbs_cq_moderation attr;
+ __u32 reserved;
+};
+
+#define IB_DEVICE_NAME_MAX 64
+
+#endif /* IB_USER_VERBS_H */
diff --git a/src/rc-compat/v37/kern-abi.h b/src/rc-compat/v37/kern-abi.h
new file mode 100644
index 000000000000..570b05fec462
--- /dev/null
+++ b/src/rc-compat/v37/kern-abi.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef KERN_ABI_H
+#define KERN_ABI_H
+
+#include <linux/types.h>
+#include <assert.h>
+#include <ccan/container_of.h>
+
+#include <rdma/ib_user_verbs.h>
+#include "kernel-abi_ib_user_verbs.h"
+
+/*
+ * The minimum and maximum kernel ABI that we can handle.
+ */
+#define IB_USER_VERBS_MIN_ABI_VERSION 3
+#define IB_USER_VERBS_MAX_ABI_VERSION 6
+
+struct ex_hdr {
+ struct ib_uverbs_cmd_hdr hdr;
+ struct ib_uverbs_ex_cmd_hdr ex_hdr;
+};
+
+/*
+ * These macros expand to type names that refer to the ABI structure type
+ * associated with the given enum string.
+ */
+#define IBV_ABI_REQ(_enum) _ABI_REQ_STRUCT_##_enum
+#define IBV_KABI_REQ(_enum) _KABI_REQ_STRUCT_##_enum
+#define IBV_KABI_RESP(_enum) _KABI_RESP_STRUCT_##_enum
+
+#define IBV_ABI_ALIGN(_enum) _ABI_ALIGN_##_enum
+
+/*
+ * Historically the code had copied the data in the kernel headers, modified
+ * it and placed them in structs. To avoid recoding eveything we continue to
+ * preserve the same struct layout, with the kernel struct 'loose' inside the
+ * modified userspace struct.
+ *
+ * This is automated with the make_abi_structs.py script which produces the
+ * _STRUCT_xx macro that produces a tagless version of the kernel struct. The
+ * tagless struct produces a layout that matches the original code.
+ */
+#define DECLARE_CMDX(_enum, _name, _kabi, _kabi_resp) \
+ struct _name { \
+ struct ib_uverbs_cmd_hdr hdr; \
+ union { \
+ _STRUCT_##_kabi; \
+ struct _kabi core_payload; \
+ }; \
+ }; \
+ typedef struct _name IBV_ABI_REQ(_enum); \
+ typedef struct _kabi IBV_KABI_REQ(_enum); \
+ typedef struct _kabi_resp IBV_KABI_RESP(_enum); \
+ enum { IBV_ABI_ALIGN(_enum) = 4 }; \
+ static_assert(sizeof(struct _kabi_resp) % 4 == 0, \
+ "Bad resp alignment"); \
+ static_assert(_enum != -1, "Bad enum"); \
+ static_assert(sizeof(struct _name) == \
+ sizeof(struct ib_uverbs_cmd_hdr) + \
+ sizeof(struct _kabi), \
+ "Bad size")
+
+#define DECLARE_CMD(_enum, _name, _kabi) \
+ DECLARE_CMDX(_enum, _name, _kabi, _kabi##_resp)
+
+#define DECLARE_CMD_EXX(_enum, _name, _kabi, _kabi_resp) \
+ struct _name { \
+ struct ex_hdr hdr; \
+ union { \
+ _STRUCT_##_kabi; \
+ struct _kabi core_payload; \
+ }; \
+ }; \
+ typedef struct _name IBV_ABI_REQ(_enum); \
+ typedef struct _kabi IBV_KABI_REQ(_enum); \
+ typedef struct _kabi_resp IBV_KABI_RESP(_enum); \
+ enum { IBV_ABI_ALIGN(_enum) = 8 }; \
+ static_assert(_enum != -1, "Bad enum"); \
+ static_assert(sizeof(struct _kabi) % 8 == 0, "Bad req alignment"); \
+ static_assert(sizeof(struct _kabi_resp) % 8 == 0, \
+ "Bad resp alignment"); \
+ static_assert(sizeof(struct _name) == \
+ sizeof(struct ex_hdr) + sizeof(struct _kabi), \
+ "Bad size"); \
+ static_assert(sizeof(struct _name) % 8 == 0, "Bad alignment")
+#define DECLARE_CMD_EX(_enum, _name, _kabi) \
+ DECLARE_CMD_EXX(_enum, _name, _kabi, _kabi##_resp)
+
+/* Drivers may use 'empty' for _kabi to signal no struct */
+struct empty {};
+#define _STRUCT_empty struct {}
+
+/*
+ * Define the ABI struct for use by the driver. The internal cmd APIs require
+ * this layout. The driver specifies the enum # they wish to define for and
+ * the base name, and the macros figure out the rest correctly.
+ *
+ * The static asserts check that the layout produced by the wrapper struct has
+ * no implicit padding in strange places, specifically between the core
+ * structure and the driver structure and between the driver structure and the
+ * end of the struct.
+ *
+ * Implicit padding can arise in various cases where the structs are not sizes
+ * to a multiple of 8 bytes.
+ */
+#define DECLARE_DRV_CMD(_name, _enum, _kabi_req, _kabi_resp) \
+ struct _name { \
+ IBV_ABI_REQ(_enum) ibv_cmd; \
+ union { \
+ _STRUCT_##_kabi_req; \
+ struct _kabi_req drv_payload; \
+ }; \
+ }; \
+ struct _name##_resp { \
+ IBV_KABI_RESP(_enum) ibv_resp; \
+ union { \
+ _STRUCT_##_kabi_resp; \
+ struct _kabi_resp drv_payload; \
+ }; \
+ }; \
+ static_assert(sizeof(IBV_KABI_REQ(_enum)) % \
+ __alignof__(struct _kabi_req) == \
+ 0, \
+ "Bad kabi req struct length"); \
+ static_assert(sizeof(struct _name) == \
+ sizeof(IBV_ABI_REQ(_enum)) + \
+ sizeof(struct _kabi_req), \
+ "Bad req size"); \
+ static_assert(sizeof(struct _name) % IBV_ABI_ALIGN(_enum) == 0, \
+ "Bad kabi req alignment"); \
+ static_assert(sizeof(IBV_KABI_RESP(_enum)) % \
+ __alignof__(struct _kabi_resp) == \
+ 0, \
+ "Bad kabi resp struct length"); \
+ static_assert(sizeof(struct _name##_resp) == \
+ sizeof(IBV_KABI_RESP(_enum)) + \
+ sizeof(struct _kabi_resp), \
+ "Bad resp size"); \
+ static_assert(sizeof(struct _name##_resp) % IBV_ABI_ALIGN(_enum) == 0, \
+ "Bad kabi resp alignment");
+
+DECLARE_CMD(IB_USER_VERBS_CMD_ALLOC_MW, ibv_alloc_mw, ib_uverbs_alloc_mw);
+DECLARE_CMD(IB_USER_VERBS_CMD_ALLOC_PD, ibv_alloc_pd, ib_uverbs_alloc_pd);
+DECLARE_CMDX(IB_USER_VERBS_CMD_ATTACH_MCAST, ibv_attach_mcast, ib_uverbs_attach_mcast, empty);
+DECLARE_CMDX(IB_USER_VERBS_CMD_CLOSE_XRCD, ibv_close_xrcd, ib_uverbs_close_xrcd, empty);
+DECLARE_CMD(IB_USER_VERBS_CMD_CREATE_AH, ibv_create_ah, ib_uverbs_create_ah);
+DECLARE_CMD(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL, ibv_create_comp_channel, ib_uverbs_create_comp_channel);
+DECLARE_CMD(IB_USER_VERBS_CMD_CREATE_CQ, ibv_create_cq, ib_uverbs_create_cq);
+DECLARE_CMD(IB_USER_VERBS_CMD_CREATE_QP, ibv_create_qp, ib_uverbs_create_qp);
+DECLARE_CMD(IB_USER_VERBS_CMD_CREATE_SRQ, ibv_create_srq, ib_uverbs_create_srq);
+DECLARE_CMDX(IB_USER_VERBS_CMD_CREATE_XSRQ, ibv_create_xsrq, ib_uverbs_create_xsrq, ib_uverbs_create_srq_resp);
+DECLARE_CMDX(IB_USER_VERBS_CMD_DEALLOC_MW, ibv_dealloc_mw, ib_uverbs_dealloc_mw, empty);
+DECLARE_CMDX(IB_USER_VERBS_CMD_DEALLOC_PD, ibv_dealloc_pd, ib_uverbs_dealloc_pd, empty);
+DECLARE_CMDX(IB_USER_VERBS_CMD_DEREG_MR, ibv_dereg_mr, ib_uverbs_dereg_mr, empty);
+DECLARE_CMDX(IB_USER_VERBS_CMD_DESTROY_AH, ibv_destroy_ah, ib_uverbs_destroy_ah, empty);
+DECLARE_CMD(IB_USER_VERBS_CMD_DESTROY_CQ, ibv_destroy_cq, ib_uverbs_destroy_cq);
+DECLARE_CMD(IB_USER_VERBS_CMD_DESTROY_QP, ibv_destroy_qp, ib_uverbs_destroy_qp);
+DECLARE_CMD(IB_USER_VERBS_CMD_DESTROY_SRQ, ibv_destroy_srq, ib_uverbs_destroy_srq);
+DECLARE_CMDX(IB_USER_VERBS_CMD_DETACH_MCAST, ibv_detach_mcast, ib_uverbs_detach_mcast, empty);
+DECLARE_CMD(IB_USER_VERBS_CMD_GET_CONTEXT, ibv_get_context, ib_uverbs_get_context);
+DECLARE_CMDX(IB_USER_VERBS_CMD_MODIFY_QP, ibv_modify_qp, ib_uverbs_modify_qp, empty);
+DECLARE_CMDX(IB_USER_VERBS_CMD_MODIFY_SRQ, ibv_modify_srq, ib_uverbs_modify_srq, empty);
+DECLARE_CMDX(IB_USER_VERBS_CMD_OPEN_QP, ibv_open_qp, ib_uverbs_open_qp, ib_uverbs_create_qp_resp);
+DECLARE_CMD(IB_USER_VERBS_CMD_OPEN_XRCD, ibv_open_xrcd, ib_uverbs_open_xrcd);
+DECLARE_CMD(IB_USER_VERBS_CMD_POLL_CQ, ibv_poll_cq, ib_uverbs_poll_cq);
+DECLARE_CMD(IB_USER_VERBS_CMD_POST_RECV, ibv_post_recv, ib_uverbs_post_recv);
+DECLARE_CMD(IB_USER_VERBS_CMD_POST_SEND, ibv_post_send, ib_uverbs_post_send);
+DECLARE_CMD(IB_USER_VERBS_CMD_POST_SRQ_RECV, ibv_post_srq_recv, ib_uverbs_post_srq_recv);
+DECLARE_CMD(IB_USER_VERBS_CMD_QUERY_DEVICE, ibv_query_device, ib_uverbs_query_device);
+DECLARE_CMD(IB_USER_VERBS_CMD_QUERY_PORT, ibv_query_port, ib_uverbs_query_port);
+DECLARE_CMD(IB_USER_VERBS_CMD_QUERY_QP, ibv_query_qp, ib_uverbs_query_qp);
+DECLARE_CMD(IB_USER_VERBS_CMD_QUERY_SRQ, ibv_query_srq, ib_uverbs_query_srq);
+DECLARE_CMD(IB_USER_VERBS_CMD_REG_MR, ibv_reg_mr, ib_uverbs_reg_mr);
+DECLARE_CMDX(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ, ibv_req_notify_cq, ib_uverbs_req_notify_cq, empty);
+DECLARE_CMD(IB_USER_VERBS_CMD_REREG_MR, ibv_rereg_mr, ib_uverbs_rereg_mr);
+DECLARE_CMD(IB_USER_VERBS_CMD_RESIZE_CQ, ibv_resize_cq, ib_uverbs_resize_cq);
+
+DECLARE_CMD_EX(IB_USER_VERBS_EX_CMD_CREATE_CQ, ibv_create_cq_ex, ib_uverbs_ex_create_cq);
+DECLARE_CMD_EX(IB_USER_VERBS_EX_CMD_CREATE_FLOW, ibv_create_flow, ib_uverbs_create_flow);
+DECLARE_CMD_EX(IB_USER_VERBS_EX_CMD_CREATE_QP, ibv_create_qp_ex, ib_uverbs_ex_create_qp);
+DECLARE_CMD_EX(IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL, ibv_create_rwq_ind_table, ib_uverbs_ex_create_rwq_ind_table);
+DECLARE_CMD_EX(IB_USER_VERBS_EX_CMD_CREATE_WQ, ibv_create_wq, ib_uverbs_ex_create_wq);
+DECLARE_CMD_EXX(IB_USER_VERBS_EX_CMD_DESTROY_FLOW, ibv_destroy_flow, ib_uverbs_destroy_flow, empty);
+DECLARE_CMD_EXX(IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL, ibv_destroy_rwq_ind_table, ib_uverbs_ex_destroy_rwq_ind_table, empty);
+DECLARE_CMD_EX(IB_USER_VERBS_EX_CMD_DESTROY_WQ, ibv_destroy_wq, ib_uverbs_ex_destroy_wq);
+DECLARE_CMD_EXX(IB_USER_VERBS_EX_CMD_MODIFY_CQ, ibv_modify_cq, ib_uverbs_ex_modify_cq, empty);
+DECLARE_CMD_EX(IB_USER_VERBS_EX_CMD_MODIFY_QP, ibv_modify_qp_ex, ib_uverbs_ex_modify_qp);
+DECLARE_CMD_EXX(IB_USER_VERBS_EX_CMD_MODIFY_WQ, ibv_modify_wq, ib_uverbs_ex_modify_wq, empty);
+DECLARE_CMD_EX(IB_USER_VERBS_EX_CMD_QUERY_DEVICE, ibv_query_device_ex, ib_uverbs_ex_query_device);
+
+/*
+ * Both ib_uverbs_create_qp and ib_uverbs_ex_create_qp start with the same
+ * structure, this function converts the ex version into the normal version
+ */
+static inline struct ib_uverbs_create_qp *
+ibv_create_qp_ex_to_reg(struct ibv_create_qp_ex *cmd_ex)
+{
+ /*
+ * user_handle is the start in both places, note that the ex
+ * does not have response located in the same place, so response
+ * cannot be touched.
+ */
+ return container_of(&cmd_ex->user_handle, struct ib_uverbs_create_qp,
+ user_handle);
+}
+
+/*
+ * This file contains copied data from the kernel's include/uapi/rdma/ib_user_verbs.h,
+ * now included above.
+ *
+ * Whenever possible use the definition from the kernel header and avoid
+ * copying from that header into this file.
+ */
+
+struct ibv_kern_ipv4_filter {
+ __u32 src_ip;
+ __u32 dst_ip;
+};
+
+struct ibv_kern_spec_ipv4 {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ struct ibv_kern_ipv4_filter val;
+ struct ibv_kern_ipv4_filter mask;
+};
+
+struct ibv_kern_spec {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct ib_uverbs_flow_spec_eth eth;
+ struct ibv_kern_spec_ipv4 ipv4;
+ struct ib_uverbs_flow_spec_ipv4 ipv4_ext;
+ struct ib_uverbs_flow_spec_esp esp;
+ struct ib_uverbs_flow_spec_tcp_udp tcp_udp;
+ struct ib_uverbs_flow_spec_ipv6 ipv6;
+ struct ib_uverbs_flow_spec_gre gre;
+ struct ib_uverbs_flow_spec_tunnel tunnel;
+ struct ib_uverbs_flow_spec_mpls mpls;
+ struct ib_uverbs_flow_spec_action_tag flow_tag;
+ struct ib_uverbs_flow_spec_action_drop drop;
+ struct ib_uverbs_flow_spec_action_handle handle;
+ struct ib_uverbs_flow_spec_action_count flow_count;
+ };
+};
+
+struct ib_uverbs_modify_srq_v3 {
+ __u32 srq_handle;
+ __u32 attr_mask;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 srq_limit;
+ __u32 reserved;
+};
+#define _STRUCT_ib_uverbs_modify_srq_v3
+enum { IB_USER_VERBS_CMD_MODIFY_SRQ_V3 = IB_USER_VERBS_CMD_MODIFY_SRQ };
+DECLARE_CMDX(IB_USER_VERBS_CMD_MODIFY_SRQ_V3, ibv_modify_srq_v3, ib_uverbs_modify_srq_v3, empty);
+
+struct ibv_create_qp_resp_v3 {
+ __u32 qp_handle;
+ __u32 qpn;
+};
+
+struct ibv_create_qp_resp_v4 {
+ __u32 qp_handle;
+ __u32 qpn;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+};
+
+struct ibv_create_srq_resp_v5 {
+ __u32 srq_handle;
+};
+
+#define _STRUCT_ib_uverbs_create_srq_v5
+enum { IB_USER_VERBS_CMD_CREATE_SRQ_V5 = IB_USER_VERBS_CMD_CREATE_SRQ };
+DECLARE_CMDX(IB_USER_VERBS_CMD_CREATE_SRQ_V5, ibv_create_srq_v5, ib_uverbs_create_srq, ibv_create_srq_resp_v5);
+
+#define _STRUCT_ib_uverbs_create_qp_v4
+enum { IB_USER_VERBS_CMD_CREATE_QP_V4 = IB_USER_VERBS_CMD_CREATE_QP };
+DECLARE_CMDX(IB_USER_VERBS_CMD_CREATE_QP_V4, ibv_create_qp_v4, ib_uverbs_create_qp, ibv_create_qp_resp_v4);
+
+#define _STRUCT_ib_uverbs_create_qp_v3
+enum { IB_USER_VERBS_CMD_CREATE_QP_V3 = IB_USER_VERBS_CMD_CREATE_QP };
+DECLARE_CMDX(IB_USER_VERBS_CMD_CREATE_QP_V3, ibv_create_qp_v3, ib_uverbs_create_qp, ibv_create_qp_resp_v3);
+#endif /* KERN_ABI_H */
diff --git a/src/rc-compat/v37/kernel-abi_ib_user_verbs.h b/src/rc-compat/v37/kernel-abi_ib_user_verbs.h
new file mode 100644
index 000000000000..fbe4ae635b84
--- /dev/null
+++ b/src/rc-compat/v37/kernel-abi_ib_user_verbs.h
@@ -0,0 +1,1114 @@
+#define _STRUCT_ib_uverbs_async_event_desc struct { \
+__aligned_u64 element; \
+__u32 event_type; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_comp_event_desc struct { \
+__aligned_u64 cq_handle; \
+}
+
+#define _STRUCT_ib_uverbs_cq_moderation_caps struct { \
+__u16 max_cq_moderation_count; \
+__u16 max_cq_moderation_period; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_cmd_hdr struct { \
+__u32 command; \
+__u16 in_words; \
+__u16 out_words; \
+}
+
+#define _STRUCT_ib_uverbs_ex_cmd_hdr struct { \
+__aligned_u64 response; \
+__u16 provider_in_words; \
+__u16 provider_out_words; \
+__u32 cmd_hdr_reserved; \
+}
+
+#define _STRUCT_ib_uverbs_get_context struct { \
+__aligned_u64 response; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_get_context_resp struct { \
+__u32 async_fd; \
+__u32 num_comp_vectors; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_query_device struct { \
+__aligned_u64 response; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_query_device_resp struct { \
+__aligned_u64 fw_ver; \
+__be64 node_guid; \
+__be64 sys_image_guid; \
+__aligned_u64 max_mr_size; \
+__aligned_u64 page_size_cap; \
+__u32 vendor_id; \
+__u32 vendor_part_id; \
+__u32 hw_ver; \
+__u32 max_qp; \
+__u32 max_qp_wr; \
+__u32 device_cap_flags; \
+__u32 max_sge; \
+__u32 max_sge_rd; \
+__u32 max_cq; \
+__u32 max_cqe; \
+__u32 max_mr; \
+__u32 max_pd; \
+__u32 max_qp_rd_atom; \
+__u32 max_ee_rd_atom; \
+__u32 max_res_rd_atom; \
+__u32 max_qp_init_rd_atom; \
+__u32 max_ee_init_rd_atom; \
+__u32 atomic_cap; \
+__u32 max_ee; \
+__u32 max_rdd; \
+__u32 max_mw; \
+__u32 max_raw_ipv6_qp; \
+__u32 max_raw_ethy_qp; \
+__u32 max_mcast_grp; \
+__u32 max_mcast_qp_attach; \
+__u32 max_total_mcast_qp_attach; \
+__u32 max_ah; \
+__u32 max_fmr; \
+__u32 max_map_per_fmr; \
+__u32 max_srq; \
+__u32 max_srq_wr; \
+__u32 max_srq_sge; \
+__u16 max_pkeys; \
+__u8 local_ca_ack_delay; \
+__u8 phys_port_cnt; \
+__u8 reserved[4]; \
+}
+
+#define _STRUCT_ib_uverbs_ex_query_device struct { \
+__u32 comp_mask; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_odp_caps struct { \
+__aligned_u64 general_caps; \
+struct { \
+__u32 rc_odp_caps; \
+__u32 uc_odp_caps; \
+__u32 ud_odp_caps; \
+} per_transport_caps; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_rss_caps struct { \
+/* Corresponding bit will be set if qp type from \
+* 'enum ib_qp_type' is supported, e.g. \
+* supported_qpts |= 1 << IB_QPT_UD \
+*/ \
+__u32 supported_qpts; \
+__u32 max_rwq_indirection_tables; \
+__u32 max_rwq_indirection_table_size; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_tm_caps struct { \
+ \
+__u32 max_rndv_hdr_size; \
+ \
+__u32 max_num_tags; \
+ \
+__u32 flags; \
+ \
+__u32 max_ops; \
+ \
+__u32 max_sge; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_ex_query_device_resp struct { \
+struct ib_uverbs_query_device_resp base; \
+__u32 comp_mask; \
+__u32 response_length; \
+struct ib_uverbs_odp_caps odp_caps; \
+__aligned_u64 timestamp_mask; \
+__aligned_u64 hca_core_clock; \
+__aligned_u64 device_cap_flags_ex; \
+struct ib_uverbs_rss_caps rss_caps; \
+__u32 max_wq_type_rq; \
+__u32 raw_packet_caps; \
+struct ib_uverbs_tm_caps tm_caps; \
+struct ib_uverbs_cq_moderation_caps cq_moderation_caps; \
+__aligned_u64 max_dm_size; \
+__u32 xrc_odp_caps; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_query_port struct { \
+__aligned_u64 response; \
+__u8 port_num; \
+__u8 reserved[7]; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_query_port_resp struct { \
+__u32 port_cap_flags; \
+__u32 max_msg_sz; \
+__u32 bad_pkey_cntr; \
+__u32 qkey_viol_cntr; \
+__u32 gid_tbl_len; \
+__u16 pkey_tbl_len; \
+__u16 lid; \
+__u16 sm_lid; \
+__u8 state; \
+__u8 max_mtu; \
+__u8 active_mtu; \
+__u8 lmc; \
+__u8 max_vl_num; \
+__u8 sm_sl; \
+__u8 subnet_timeout; \
+__u8 init_type_reply; \
+__u8 active_width; \
+__u8 active_speed; \
+__u8 phys_state; \
+__u8 link_layer; \
+__u8 flags; \
+__u8 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_alloc_pd struct { \
+__aligned_u64 response; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_alloc_pd_resp struct { \
+__u32 pd_handle; \
+__u32 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_dealloc_pd struct { \
+__u32 pd_handle; \
+}
+
+#define _STRUCT_ib_uverbs_open_xrcd struct { \
+__aligned_u64 response; \
+__u32 fd; \
+__u32 oflags; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_open_xrcd_resp struct { \
+__u32 xrcd_handle; \
+__u32 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_close_xrcd struct { \
+__u32 xrcd_handle; \
+}
+
+#define _STRUCT_ib_uverbs_reg_mr struct { \
+__aligned_u64 response; \
+__aligned_u64 start; \
+__aligned_u64 length; \
+__aligned_u64 hca_va; \
+__u32 pd_handle; \
+__u32 access_flags; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_reg_mr_resp struct { \
+__u32 mr_handle; \
+__u32 lkey; \
+__u32 rkey; \
+__u32 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_rereg_mr struct { \
+__aligned_u64 response; \
+__u32 mr_handle; \
+__u32 flags; \
+__aligned_u64 start; \
+__aligned_u64 length; \
+__aligned_u64 hca_va; \
+__u32 pd_handle; \
+__u32 access_flags; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_rereg_mr_resp struct { \
+__u32 lkey; \
+__u32 rkey; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_dereg_mr struct { \
+__u32 mr_handle; \
+}
+
+#define _STRUCT_ib_uverbs_alloc_mw struct { \
+__aligned_u64 response; \
+__u32 pd_handle; \
+__u8 mw_type; \
+__u8 reserved[3]; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_alloc_mw_resp struct { \
+__u32 mw_handle; \
+__u32 rkey; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_dealloc_mw struct { \
+__u32 mw_handle; \
+}
+
+#define _STRUCT_ib_uverbs_create_comp_channel struct { \
+__aligned_u64 response; \
+}
+
+#define _STRUCT_ib_uverbs_create_comp_channel_resp struct { \
+__u32 fd; \
+}
+
+#define _STRUCT_ib_uverbs_create_cq struct { \
+__aligned_u64 response; \
+__aligned_u64 user_handle; \
+__u32 cqe; \
+__u32 comp_vector; \
+__s32 comp_channel; \
+__u32 reserved; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_ex_create_cq struct { \
+__aligned_u64 user_handle; \
+__u32 cqe; \
+__u32 comp_vector; \
+__s32 comp_channel; \
+__u32 comp_mask; \
+__u32 flags; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_create_cq_resp struct { \
+__u32 cq_handle; \
+__u32 cqe; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_ex_create_cq_resp struct { \
+struct ib_uverbs_create_cq_resp base; \
+__u32 comp_mask; \
+__u32 response_length; \
+}
+
+#define _STRUCT_ib_uverbs_resize_cq struct { \
+__aligned_u64 response; \
+__u32 cq_handle; \
+__u32 cqe; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_resize_cq_resp struct { \
+__u32 cqe; \
+__u32 reserved; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_poll_cq struct { \
+__aligned_u64 response; \
+__u32 cq_handle; \
+__u32 ne; \
+}
+
+#define _STRUCT_ib_uverbs_wc struct { \
+__aligned_u64 wr_id; \
+__u32 status; \
+__u32 opcode; \
+__u32 vendor_err; \
+__u32 byte_len; \
+union { \
+__be32 imm_data; \
+__u32 invalidate_rkey; \
+} ex; \
+__u32 qp_num; \
+__u32 src_qp; \
+__u32 wc_flags; \
+__u16 pkey_index; \
+__u16 slid; \
+__u8 sl; \
+__u8 dlid_path_bits; \
+__u8 port_num; \
+__u8 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_poll_cq_resp struct { \
+__u32 count; \
+__u32 reserved; \
+struct ib_uverbs_wc wc[0]; \
+}
+
+#define _STRUCT_ib_uverbs_req_notify_cq struct { \
+__u32 cq_handle; \
+__u32 solicited_only; \
+}
+
+#define _STRUCT_ib_uverbs_destroy_cq struct { \
+__aligned_u64 response; \
+__u32 cq_handle; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_destroy_cq_resp struct { \
+__u32 comp_events_reported; \
+__u32 async_events_reported; \
+}
+
+#define _STRUCT_ib_uverbs_global_route struct { \
+__u8 dgid[16]; \
+__u32 flow_label; \
+__u8 sgid_index; \
+__u8 hop_limit; \
+__u8 traffic_class; \
+__u8 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_ah_attr struct { \
+struct ib_uverbs_global_route grh; \
+__u16 dlid; \
+__u8 sl; \
+__u8 src_path_bits; \
+__u8 static_rate; \
+__u8 is_global; \
+__u8 port_num; \
+__u8 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_qp_attr struct { \
+__u32 qp_attr_mask; \
+__u32 qp_state; \
+__u32 cur_qp_state; \
+__u32 path_mtu; \
+__u32 path_mig_state; \
+__u32 qkey; \
+__u32 rq_psn; \
+__u32 sq_psn; \
+__u32 dest_qp_num; \
+__u32 qp_access_flags; \
+ \
+struct ib_uverbs_ah_attr ah_attr; \
+struct ib_uverbs_ah_attr alt_ah_attr; \
+ \
+ \
+__u32 max_send_wr; \
+__u32 max_recv_wr; \
+__u32 max_send_sge; \
+__u32 max_recv_sge; \
+__u32 max_inline_data; \
+ \
+__u16 pkey_index; \
+__u16 alt_pkey_index; \
+__u8 en_sqd_async_notify; \
+__u8 sq_draining; \
+__u8 max_rd_atomic; \
+__u8 max_dest_rd_atomic; \
+__u8 min_rnr_timer; \
+__u8 port_num; \
+__u8 timeout; \
+__u8 retry_cnt; \
+__u8 rnr_retry; \
+__u8 alt_port_num; \
+__u8 alt_timeout; \
+__u8 reserved[5]; \
+}
+
+#define _STRUCT_ib_uverbs_create_qp struct { \
+__aligned_u64 response; \
+__aligned_u64 user_handle; \
+__u32 pd_handle; \
+__u32 send_cq_handle; \
+__u32 recv_cq_handle; \
+__u32 srq_handle; \
+__u32 max_send_wr; \
+__u32 max_recv_wr; \
+__u32 max_send_sge; \
+__u32 max_recv_sge; \
+__u32 max_inline_data; \
+__u8 sq_sig_all; \
+__u8 qp_type; \
+__u8 is_srq; \
+__u8 reserved; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_ex_create_qp struct { \
+__aligned_u64 user_handle; \
+__u32 pd_handle; \
+__u32 send_cq_handle; \
+__u32 recv_cq_handle; \
+__u32 srq_handle; \
+__u32 max_send_wr; \
+__u32 max_recv_wr; \
+__u32 max_send_sge; \
+__u32 max_recv_sge; \
+__u32 max_inline_data; \
+__u8 sq_sig_all; \
+__u8 qp_type; \
+__u8 is_srq; \
+__u8 reserved; \
+__u32 comp_mask; \
+__u32 create_flags; \
+__u32 rwq_ind_tbl_handle; \
+__u32 source_qpn; \
+}
+
+#define _STRUCT_ib_uverbs_open_qp struct { \
+__aligned_u64 response; \
+__aligned_u64 user_handle; \
+__u32 pd_handle; \
+__u32 qpn; \
+__u8 qp_type; \
+__u8 reserved[7]; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_create_qp_resp struct { \
+__u32 qp_handle; \
+__u32 qpn; \
+__u32 max_send_wr; \
+__u32 max_recv_wr; \
+__u32 max_send_sge; \
+__u32 max_recv_sge; \
+__u32 max_inline_data; \
+__u32 reserved; \
+__u32 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_ex_create_qp_resp struct { \
+struct ib_uverbs_create_qp_resp base; \
+__u32 comp_mask; \
+__u32 response_length; \
+}
+
+#define _STRUCT_ib_uverbs_qp_dest struct { \
+__u8 dgid[16]; \
+__u32 flow_label; \
+__u16 dlid; \
+__u16 reserved; \
+__u8 sgid_index; \
+__u8 hop_limit; \
+__u8 traffic_class; \
+__u8 sl; \
+__u8 src_path_bits; \
+__u8 static_rate; \
+__u8 is_global; \
+__u8 port_num; \
+}
+
+#define _STRUCT_ib_uverbs_query_qp struct { \
+__aligned_u64 response; \
+__u32 qp_handle; \
+__u32 attr_mask; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_query_qp_resp struct { \
+struct ib_uverbs_qp_dest dest; \
+struct ib_uverbs_qp_dest alt_dest; \
+__u32 max_send_wr; \
+__u32 max_recv_wr; \
+__u32 max_send_sge; \
+__u32 max_recv_sge; \
+__u32 max_inline_data; \
+__u32 qkey; \
+__u32 rq_psn; \
+__u32 sq_psn; \
+__u32 dest_qp_num; \
+__u32 qp_access_flags; \
+__u16 pkey_index; \
+__u16 alt_pkey_index; \
+__u8 qp_state; \
+__u8 cur_qp_state; \
+__u8 path_mtu; \
+__u8 path_mig_state; \
+__u8 sq_draining; \
+__u8 max_rd_atomic; \
+__u8 max_dest_rd_atomic; \
+__u8 min_rnr_timer; \
+__u8 port_num; \
+__u8 timeout; \
+__u8 retry_cnt; \
+__u8 rnr_retry; \
+__u8 alt_port_num; \
+__u8 alt_timeout; \
+__u8 sq_sig_all; \
+__u8 reserved[5]; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_modify_qp struct { \
+struct ib_uverbs_qp_dest dest; \
+struct ib_uverbs_qp_dest alt_dest; \
+__u32 qp_handle; \
+__u32 attr_mask; \
+__u32 qkey; \
+__u32 rq_psn; \
+__u32 sq_psn; \
+__u32 dest_qp_num; \
+__u32 qp_access_flags; \
+__u16 pkey_index; \
+__u16 alt_pkey_index; \
+__u8 qp_state; \
+__u8 cur_qp_state; \
+__u8 path_mtu; \
+__u8 path_mig_state; \
+__u8 en_sqd_async_notify; \
+__u8 max_rd_atomic; \
+__u8 max_dest_rd_atomic; \
+__u8 min_rnr_timer; \
+__u8 port_num; \
+__u8 timeout; \
+__u8 retry_cnt; \
+__u8 rnr_retry; \
+__u8 alt_port_num; \
+__u8 alt_timeout; \
+__u8 reserved[2]; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_ex_modify_qp struct { \
+struct ib_uverbs_modify_qp base; \
+__u32 rate_limit; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_ex_modify_qp_resp struct { \
+__u32 comp_mask; \
+__u32 response_length; \
+}
+
+#define _STRUCT_ib_uverbs_destroy_qp struct { \
+__aligned_u64 response; \
+__u32 qp_handle; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_destroy_qp_resp struct { \
+__u32 events_reported; \
+}
+
+#define _STRUCT_ib_uverbs_sge struct { \
+__aligned_u64 addr; \
+__u32 length; \
+__u32 lkey; \
+}
+
+#define _STRUCT_ib_uverbs_send_wr struct { \
+__aligned_u64 wr_id; \
+__u32 num_sge; \
+__u32 opcode; \
+__u32 send_flags; \
+union { \
+__be32 imm_data; \
+__u32 invalidate_rkey; \
+} ex; \
+union { \
+struct { \
+__aligned_u64 remote_addr; \
+__u32 rkey; \
+__u32 reserved; \
+} rdma; \
+struct { \
+__aligned_u64 remote_addr; \
+__aligned_u64 compare_add; \
+__aligned_u64 swap; \
+__u32 rkey; \
+__u32 reserved; \
+} atomic; \
+struct { \
+__u32 ah; \
+__u32 remote_qpn; \
+__u32 remote_qkey; \
+__u32 reserved; \
+} ud; \
+} wr; \
+}
+
+#define _STRUCT_ib_uverbs_post_send struct { \
+__aligned_u64 response; \
+__u32 qp_handle; \
+__u32 wr_count; \
+__u32 sge_count; \
+__u32 wqe_size; \
+struct ib_uverbs_send_wr send_wr[0]; \
+}
+
+#define _STRUCT_ib_uverbs_post_send_resp struct { \
+__u32 bad_wr; \
+}
+
+#define _STRUCT_ib_uverbs_recv_wr struct { \
+__aligned_u64 wr_id; \
+__u32 num_sge; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_post_recv struct { \
+__aligned_u64 response; \
+__u32 qp_handle; \
+__u32 wr_count; \
+__u32 sge_count; \
+__u32 wqe_size; \
+struct ib_uverbs_recv_wr recv_wr[0]; \
+}
+
+#define _STRUCT_ib_uverbs_post_recv_resp struct { \
+__u32 bad_wr; \
+}
+
+#define _STRUCT_ib_uverbs_post_srq_recv struct { \
+__aligned_u64 response; \
+__u32 srq_handle; \
+__u32 wr_count; \
+__u32 sge_count; \
+__u32 wqe_size; \
+struct ib_uverbs_recv_wr recv[0]; \
+}
+
+#define _STRUCT_ib_uverbs_post_srq_recv_resp struct { \
+__u32 bad_wr; \
+}
+
+#define _STRUCT_ib_uverbs_create_ah struct { \
+__aligned_u64 response; \
+__aligned_u64 user_handle; \
+__u32 pd_handle; \
+__u32 reserved; \
+struct ib_uverbs_ah_attr attr; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_create_ah_resp struct { \
+__u32 ah_handle; \
+__u32 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_destroy_ah struct { \
+__u32 ah_handle; \
+}
+
+#define _STRUCT_ib_uverbs_attach_mcast struct { \
+__u8 gid[16]; \
+__u32 qp_handle; \
+__u16 mlid; \
+__u16 reserved; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_detach_mcast struct { \
+__u8 gid[16]; \
+__u32 qp_handle; \
+__u16 mlid; \
+__u16 reserved; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_hdr struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+ \
+__aligned_u64 flow_spec_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_flow_eth_filter struct { \
+__u8 dst_mac[6]; \
+__u8 src_mac[6]; \
+__be16 ether_type; \
+__be16 vlan_tag; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_eth struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+struct ib_uverbs_flow_eth_filter val; \
+struct ib_uverbs_flow_eth_filter mask; \
+}
+
+#define _STRUCT_ib_uverbs_flow_ipv4_filter struct { \
+__be32 src_ip; \
+__be32 dst_ip; \
+__u8 proto; \
+__u8 tos; \
+__u8 ttl; \
+__u8 flags; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_ipv4 struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+struct ib_uverbs_flow_ipv4_filter val; \
+struct ib_uverbs_flow_ipv4_filter mask; \
+}
+
+#define _STRUCT_ib_uverbs_flow_tcp_udp_filter struct { \
+__be16 dst_port; \
+__be16 src_port; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_tcp_udp struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+struct ib_uverbs_flow_tcp_udp_filter val; \
+struct ib_uverbs_flow_tcp_udp_filter mask; \
+}
+
+#define _STRUCT_ib_uverbs_flow_ipv6_filter struct { \
+__u8 src_ip[16]; \
+__u8 dst_ip[16]; \
+__be32 flow_label; \
+__u8 next_hdr; \
+__u8 traffic_class; \
+__u8 hop_limit; \
+__u8 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_ipv6 struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+struct ib_uverbs_flow_ipv6_filter val; \
+struct ib_uverbs_flow_ipv6_filter mask; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_action_tag struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+__u32 tag_id; \
+__u32 reserved1; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_action_drop struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_action_handle struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+__u32 handle; \
+__u32 reserved1; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_action_count struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+__u32 handle; \
+__u32 reserved1; \
+}
+
+#define _STRUCT_ib_uverbs_flow_tunnel_filter struct { \
+__be32 tunnel_id; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_tunnel struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+struct ib_uverbs_flow_tunnel_filter val; \
+struct ib_uverbs_flow_tunnel_filter mask; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_esp_filter struct { \
+__u32 spi; \
+__u32 seq; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_esp struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+struct ib_uverbs_flow_spec_esp_filter val; \
+struct ib_uverbs_flow_spec_esp_filter mask; \
+}
+
+#define _STRUCT_ib_uverbs_flow_gre_filter struct { \
+/* c_ks_res0_ver field is bits 0-15 in offset 0 of a standard GRE header: \
+* bit 0 - C - checksum bit. \
+* bit 1 - reserved. set to 0. \
+* bit 2 - key bit. \
+* bit 3 - sequence number bit. \
+* bits 4:12 - reserved. set to 0. \
+* bits 13:15 - GRE version. \
+*/ \
+__be16 c_ks_res0_ver; \
+__be16 protocol; \
+__be32 key; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_gre struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+struct ib_uverbs_flow_gre_filter val; \
+struct ib_uverbs_flow_gre_filter mask; \
+}
+
+#define _STRUCT_ib_uverbs_flow_mpls_filter struct { \
+/* The field includes the entire MPLS label: \
+* bits 0:19 - label field. \
+* bits 20:22 - traffic class field. \
+* bits 23 - bottom of stack bit. \
+* bits 24:31 - ttl field. \
+*/ \
+__be32 label; \
+}
+
+#define _STRUCT_ib_uverbs_flow_spec_mpls struct { \
+union { \
+struct ib_uverbs_flow_spec_hdr hdr; \
+struct { \
+__u32 type; \
+__u16 size; \
+__u16 reserved; \
+}; \
+}; \
+struct ib_uverbs_flow_mpls_filter val; \
+struct ib_uverbs_flow_mpls_filter mask; \
+}
+
+#define _STRUCT_ib_uverbs_flow_attr struct { \
+__u32 type; \
+__u16 size; \
+__u16 priority; \
+__u8 num_of_specs; \
+__u8 reserved[2]; \
+__u8 port; \
+__u32 flags; \
+/* Following are the optional layers according to user request \
+* struct ib_flow_spec_xxx \
+* struct ib_flow_spec_yyy \
+*/ \
+struct ib_uverbs_flow_spec_hdr flow_specs[0]; \
+}
+
+#define _STRUCT_ib_uverbs_create_flow struct { \
+__u32 comp_mask; \
+__u32 qp_handle; \
+struct ib_uverbs_flow_attr flow_attr; \
+}
+
+#define _STRUCT_ib_uverbs_create_flow_resp struct { \
+__u32 comp_mask; \
+__u32 flow_handle; \
+}
+
+#define _STRUCT_ib_uverbs_destroy_flow struct { \
+__u32 comp_mask; \
+__u32 flow_handle; \
+}
+
+#define _STRUCT_ib_uverbs_create_srq struct { \
+__aligned_u64 response; \
+__aligned_u64 user_handle; \
+__u32 pd_handle; \
+__u32 max_wr; \
+__u32 max_sge; \
+__u32 srq_limit; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_create_xsrq struct { \
+__aligned_u64 response; \
+__aligned_u64 user_handle; \
+__u32 srq_type; \
+__u32 pd_handle; \
+__u32 max_wr; \
+__u32 max_sge; \
+__u32 srq_limit; \
+__u32 max_num_tags; \
+__u32 xrcd_handle; \
+__u32 cq_handle; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_create_srq_resp struct { \
+__u32 srq_handle; \
+__u32 max_wr; \
+__u32 max_sge; \
+__u32 srqn; \
+__u32 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_modify_srq struct { \
+__u32 srq_handle; \
+__u32 attr_mask; \
+__u32 max_wr; \
+__u32 srq_limit; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_query_srq struct { \
+__aligned_u64 response; \
+__u32 srq_handle; \
+__u32 reserved; \
+__aligned_u64 driver_data[0]; \
+}
+
+#define _STRUCT_ib_uverbs_query_srq_resp struct { \
+__u32 max_wr; \
+__u32 max_sge; \
+__u32 srq_limit; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_destroy_srq struct { \
+__aligned_u64 response; \
+__u32 srq_handle; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_destroy_srq_resp struct { \
+__u32 events_reported; \
+}
+
+#define _STRUCT_ib_uverbs_ex_create_wq struct { \
+__u32 comp_mask; \
+__u32 wq_type; \
+__aligned_u64 user_handle; \
+__u32 pd_handle; \
+__u32 cq_handle; \
+__u32 max_wr; \
+__u32 max_sge; \
+__u32 create_flags; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_ex_create_wq_resp struct { \
+__u32 comp_mask; \
+__u32 response_length; \
+__u32 wq_handle; \
+__u32 max_wr; \
+__u32 max_sge; \
+__u32 wqn; \
+}
+
+#define _STRUCT_ib_uverbs_ex_destroy_wq struct { \
+__u32 comp_mask; \
+__u32 wq_handle; \
+}
+
+#define _STRUCT_ib_uverbs_ex_destroy_wq_resp struct { \
+__u32 comp_mask; \
+__u32 response_length; \
+__u32 events_reported; \
+__u32 reserved; \
+}
+
+#define _STRUCT_ib_uverbs_ex_modify_wq struct { \
+__u32 attr_mask; \
+__u32 wq_handle; \
+__u32 wq_state; \
+__u32 curr_wq_state; \
+__u32 flags; \
+__u32 flags_mask; \
+}
+
+#define _STRUCT_ib_uverbs_ex_create_rwq_ind_table struct { \
+__u32 comp_mask; \
+__u32 log_ind_tbl_size; \
+/* Following are the wq handles according to log_ind_tbl_size \
+* wq_handle1 \
+* wq_handle2 \
+*/ \
+__u32 wq_handles[0]; \
+}
+
+#define _STRUCT_ib_uverbs_ex_create_rwq_ind_table_resp struct { \
+__u32 comp_mask; \
+__u32 response_length; \
+__u32 ind_tbl_handle; \
+__u32 ind_tbl_num; \
+}
+
+#define _STRUCT_ib_uverbs_ex_destroy_rwq_ind_table struct { \
+__u32 comp_mask; \
+__u32 ind_tbl_handle; \
+}
+
+#define _STRUCT_ib_uverbs_cq_moderation struct { \
+__u16 cq_count; \
+__u16 cq_period; \
+}
+
+#define _STRUCT_ib_uverbs_ex_modify_cq struct { \
+__u32 cq_handle; \
+__u32 attr_mask; \
+struct ib_uverbs_cq_moderation attr; \
+__u32 reserved; \
+}
+
diff --git a/src/rc-compat/v37/rdma_user_ioctl_cmds.h b/src/rc-compat/v37/rdma_user_ioctl_cmds.h
new file mode 100644
index 000000000000..38ab7accb7be
--- /dev/null
+++ b/src/rc-compat/v37/rdma_user_ioctl_cmds.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RDMA_USER_IOCTL_CMDS_H
+#define RDMA_USER_IOCTL_CMDS_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Documentation/userspace-api/ioctl/ioctl-number.rst */
+#define RDMA_IOCTL_MAGIC 0x1b
+#define RDMA_VERBS_IOCTL \
+ _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
+
+enum {
+ /* User input */
+ UVERBS_ATTR_F_MANDATORY = 1U << 0,
+ /*
+ * Valid output bit should be ignored and considered set in
+ * mandatory fields. This bit is kernel output.
+ */
+ UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1,
+};
+
+struct ib_uverbs_attr {
+ __u16 attr_id; /* command specific type attribute */
+ __u16 len; /* only for pointers and IDRs array */
+ __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
+ union {
+ struct {
+ __u8 elem_id;
+ __u8 reserved;
+ } enum_data;
+ __u16 reserved;
+ } attr_data;
+ union {
+ /*
+ * ptr to command, inline data, idr/fd or
+ * ptr to __u32 array of IDRs
+ */
+ __aligned_u64 data;
+ /* Used by FD_IN and FD_OUT */
+ __s64 data_s64;
+ };
+};
+
+struct ib_uverbs_ioctl_hdr {
+ __u16 length;
+ __u16 object_id;
+ __u16 method_id;
+ __u16 num_attrs;
+ __aligned_u64 reserved1;
+ __u32 driver_id;
+ __u32 reserved2;
+ struct ib_uverbs_attr attrs[0];
+};
+
+#endif
diff --git a/src/rc-compat/v37/util/cl_qmap.h b/src/rc-compat/v37/util/cl_qmap.h
new file mode 100644
index 000000000000..1a800f2c8fec
--- /dev/null
+++ b/src/rc-compat/v37/util/cl_qmap.h
@@ -0,0 +1,970 @@
+/*
+ * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved.
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/*
+ * Abstract:
+ * Declaration of quick map, a binary tree where the caller always provides
+ * all necessary storage.
+ */
+
+#ifndef _CL_QMAP_H_
+#define _CL_QMAP_H_
+
+#include <stdbool.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+typedef struct _cl_list_item {
+ struct _cl_list_item *p_next;
+ struct _cl_list_item *p_prev;
+} cl_list_item_t;
+
+typedef struct _cl_pool_item {
+ cl_list_item_t list_item;
+} cl_pool_item_t;
+
+/****h* Component Library/Quick Map
+* NAME
+* Quick Map
+*
+* DESCRIPTION
+* Quick map implements a binary tree that stores user provided cl_map_item_t
+* structures. Each item stored in a quick map has a unique 64-bit key
+* (duplicates are not allowed). Quick map provides the ability to
+* efficiently search for an item given a key.
+*
+* Quick map does not allocate any memory, and can therefore not fail
+* any operations due to insufficient memory. Quick map can thus be useful
+* in minimizing the error paths in code.
+*
+* Quick map is not thread safe, and users must provide serialization when
+* adding and removing items from the map.
+*
+* The quick map functions operate on a cl_qmap_t structure which should be
+* treated as opaque and should be manipulated only through the provided
+* functions.
+*
+* SEE ALSO
+* Structures:
+* cl_qmap_t, cl_map_item_t, cl_map_obj_t
+*
+* Callbacks:
+* cl_pfn_qmap_apply_t
+*
+* Item Manipulation:
+* cl_qmap_set_obj, cl_qmap_obj, cl_qmap_key
+*
+* Initialization:
+* cl_qmap_init
+*
+* Iteration:
+* cl_qmap_end, cl_qmap_head, cl_qmap_tail, cl_qmap_next, cl_qmap_prev
+*
+* Manipulation:
+* cl_qmap_insert, cl_qmap_get, cl_qmap_remove_item, cl_qmap_remove,
+* cl_qmap_remove_all, cl_qmap_merge, cl_qmap_delta, cl_qmap_get_next
+*
+* Search:
+* cl_qmap_apply_func
+*
+* Attributes:
+* cl_qmap_count, cl_is_qmap_empty,
+*********/
+/****i* Component Library: Quick Map/cl_map_color_t
+* NAME
+* cl_map_color_t
+*
+* DESCRIPTION
+* The cl_map_color_t enumerated type is used to note the color of
+* nodes in a map.
+*
+* SYNOPSIS
+*/
+typedef enum _cl_map_color {
+ CL_MAP_RED,
+ CL_MAP_BLACK
+} cl_map_color_t;
+/*
+* VALUES
+* CL_MAP_RED
+* The node in the map is red.
+*
+* CL_MAP_BLACK
+* The node in the map is black.
+*
+* SEE ALSO
+* Quick Map, cl_map_item_t
+*********/
+
+/****s* Component Library: Quick Map/cl_map_item_t
+* NAME
+* cl_map_item_t
+*
+* DESCRIPTION
+* The cl_map_item_t structure is used by maps to store objects.
+*
+* The cl_map_item_t structure should be treated as opaque and should
+* be manipulated only through the provided functions.
+*
+* SYNOPSIS
+*/
+typedef struct _cl_map_item {
+ /* Must be first to allow casting. */
+ cl_pool_item_t pool_item;
+ struct _cl_map_item *p_left;
+ struct _cl_map_item *p_right;
+ struct _cl_map_item *p_up;
+ cl_map_color_t color;
+ uint64_t key;
+#ifdef _DEBUG_
+ struct _cl_qmap *p_map;
+#endif
+} cl_map_item_t;
+/*
+* FIELDS
+* pool_item
+* Used to store the item in a doubly linked list, allowing more
+* efficient map traversal.
+*
+* p_left
+* Pointer to the map item that is a child to the left of the node.
+*
+* p_right
+* Pointer to the map item that is a child to the right of the node.
+*
+* p_up
+* Pointer to the map item that is the parent of the node.
+*
+* color
+* Indicates whether a node is red or black in the map.
+*
+* key
+* Value that uniquely represents a node in a map. This value is
+* set by calling cl_qmap_insert and can be retrieved by calling
+* cl_qmap_key.
+*
+* NOTES
+* None of the fields of this structure should be manipulated by users, as
+* they are crititcal to the proper operation of the map in which they
+* are stored.
+*
+* To allow storing items in either a quick list, a quick pool, or a quick
+* map, the map implementation guarantees that the map item can be safely
+* cast to a pool item used for storing an object in a quick pool, or cast
+* to a list item used for storing an object in a quick list. This removes
+* the need to embed a map item, a list item, and a pool item in objects
+* that need to be stored in a quick list, a quick pool, and a quick map.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_insert, cl_qmap_key, cl_pool_item_t, cl_list_item_t
+*********/
+
+/****s* Component Library: Quick Map/cl_map_obj_t
+* NAME
+* cl_map_obj_t
+*
+* DESCRIPTION
+* The cl_map_obj_t structure is used to store objects in maps.
+*
+* The cl_map_obj_t structure should be treated as opaque and should
+* be manipulated only through the provided functions.
+*
+* SYNOPSIS
+*/
+typedef struct _cl_map_obj {
+ cl_map_item_t item;
+ const void *p_object;
+} cl_map_obj_t;
+/*
+* FIELDS
+* item
+* Map item used by internally by the map to store an object.
+*
+* p_object
+* User defined context. Users should not access this field directly.
+* Use cl_qmap_set_obj and cl_qmap_obj to set and retrieve the value
+* of this field.
+*
+* NOTES
+* None of the fields of this structure should be manipulated by users, as
+* they are crititcal to the proper operation of the map in which they
+* are stored.
+*
+* Use cl_qmap_set_obj and cl_qmap_obj to set and retrieve the object
+* stored in a map item, respectively.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_set_obj, cl_qmap_obj, cl_map_item_t
+*********/
+
+/****s* Component Library: Quick Map/cl_qmap_t
+* NAME
+* cl_qmap_t
+*
+* DESCRIPTION
+* Quick map structure.
+*
+* The cl_qmap_t structure should be treated as opaque and should
+* be manipulated only through the provided functions.
+*
+* SYNOPSIS
+*/
+typedef struct _cl_qmap {
+ cl_map_item_t root;
+ cl_map_item_t nil;
+ size_t count;
+} cl_qmap_t;
+/*
+* PARAMETERS
+* root
+* Map item that serves as root of the map. The root is set up to
+* always have itself as parent. The left pointer is set to point
+* to the item at the root.
+*
+* nil
+* Map item that serves as terminator for all leaves, as well as
+* providing the list item used as quick list for storing map items
+* in a list for faster traversal.
+*
+* state
+* State of the map, used to verify that operations are permitted.
+*
+* count
+* Number of items in the map.
+*
+* SEE ALSO
+* Quick Map
+*********/
+
+/****d* Component Library: Quick Map/cl_pfn_qmap_apply_t
+* NAME
+* cl_pfn_qmap_apply_t
+*
+* DESCRIPTION
+* The cl_pfn_qmap_apply_t function type defines the prototype for
+* functions used to iterate items in a quick map.
+*
+* SYNOPSIS
+*/
+typedef void
+ (*cl_pfn_qmap_apply_t) (cl_map_item_t * const p_map_item, void *context);
+/*
+* PARAMETERS
+* p_map_item
+* [in] Pointer to a cl_map_item_t structure.
+*
+* context
+* [in] Value passed to the callback function.
+*
+* RETURN VALUE
+* This function does not return a value.
+*
+* NOTES
+* This function type is provided as function prototype reference for the
+* function provided by users as a parameter to the cl_qmap_apply_func
+* function.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_apply_func
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_count
+* NAME
+* cl_qmap_count
+*
+* DESCRIPTION
+* The cl_qmap_count function returns the number of items stored
+* in a quick map.
+*
+* SYNOPSIS
+*/
+static inline uint32_t cl_qmap_count(const cl_qmap_t * const p_map)
+{
+ assert(p_map);
+ return ((uint32_t) p_map->count);
+}
+
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure whose item count to return.
+*
+* RETURN VALUE
+* Returns the number of items stored in the map.
+*
+* SEE ALSO
+* Quick Map, cl_is_qmap_empty
+*********/
+
+/****f* Component Library: Quick Map/cl_is_qmap_empty
+* NAME
+* cl_is_qmap_empty
+*
+* DESCRIPTION
+* The cl_is_qmap_empty function returns whether a quick map is empty.
+*
+* SYNOPSIS
+*/
+static inline bool cl_is_qmap_empty(const cl_qmap_t * const p_map)
+{
+ assert(p_map);
+
+ return (p_map->count == 0);
+}
+
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure to test for emptiness.
+*
+* RETURN VALUES
+* TRUE if the quick map is empty.
+*
+* FALSE otherwise.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_count, cl_qmap_remove_all
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_set_obj
+* NAME
+* cl_qmap_set_obj
+*
+* DESCRIPTION
+* The cl_qmap_set_obj function sets the object stored in a map object.
+*
+* SYNOPSIS
+*/
+static inline void
+cl_qmap_set_obj(cl_map_obj_t * const p_map_obj,
+ const void *const p_object)
+{
+ assert(p_map_obj);
+ p_map_obj->p_object = p_object;
+}
+
+/*
+* PARAMETERS
+* p_map_obj
+* [in] Pointer to a map object stucture whose object pointer
+* is to be set.
+*
+* p_object
+* [in] User defined context.
+*
+* RETURN VALUE
+* This function does not return a value.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_obj
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_obj
+* NAME
+* cl_qmap_obj
+*
+* DESCRIPTION
+* The cl_qmap_obj function returns the object stored in a map object.
+*
+* SYNOPSIS
+*/
+static inline void *cl_qmap_obj(const cl_map_obj_t * const p_map_obj)
+{
+ assert(p_map_obj);
+ return ((void *)p_map_obj->p_object);
+}
+
+/*
+* PARAMETERS
+* p_map_obj
+* [in] Pointer to a map object stucture whose object pointer to return.
+*
+* RETURN VALUE
+* Returns the value of the object pointer stored in the map object.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_set_obj
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_key
+* NAME
+* cl_qmap_key
+*
+* DESCRIPTION
+* The cl_qmap_key function retrieves the key value of a map item.
+*
+* SYNOPSIS
+*/
+static inline uint64_t cl_qmap_key(const cl_map_item_t * const p_item)
+{
+ assert(p_item);
+ return (p_item->key);
+}
+
+/*
+* PARAMETERS
+* p_item
+* [in] Pointer to a map item whose key value to return.
+*
+* RETURN VALUE
+* Returns the 64-bit key value for the specified map item.
+*
+* NOTES
+* The key value is set in a call to cl_qmap_insert.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_insert
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_init
+* NAME
+* cl_qmap_init
+*
+* DESCRIPTION
+* The cl_qmap_init function initialized a quick map for use.
+*
+* SYNOPSIS
+*/
+void cl_qmap_init(cl_qmap_t * const p_map);
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure to initialize.
+*
+* RETURN VALUES
+* This function does not return a value.
+*
+* NOTES
+* Allows calling quick map manipulation functions.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_insert, cl_qmap_remove
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_end
+* NAME
+* cl_qmap_end
+*
+* DESCRIPTION
+* The cl_qmap_end function returns the end of a quick map.
+*
+* SYNOPSIS
+*/
+static inline const cl_map_item_t *cl_qmap_end(const cl_qmap_t * const p_map)
+{
+ assert(p_map);
+ /* Nil is the end of the map. */
+ return (&p_map->nil);
+}
+
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure whose end to return.
+*
+* RETURN VALUE
+* Pointer to the end of the map.
+*
+* NOTES
+* cl_qmap_end is useful for determining the validity of map items returned
+* by cl_qmap_head, cl_qmap_tail, cl_qmap_next, or cl_qmap_prev. If the
+* map item pointer returned by any of these functions compares to the end,
+* the end of the map was encoutered.
+* When using cl_qmap_head or cl_qmap_tail, this condition indicates that
+* the map is empty.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_head, cl_qmap_tail, cl_qmap_next, cl_qmap_prev
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_head
+* NAME
+* cl_qmap_head
+*
+* DESCRIPTION
+* The cl_qmap_head function returns the map item with the lowest key
+* value stored in a quick map.
+*
+* SYNOPSIS
+*/
+static inline cl_map_item_t *cl_qmap_head(const cl_qmap_t * const p_map)
+{
+ assert(p_map);
+ return ((cl_map_item_t *) p_map->nil.pool_item.list_item.p_next);
+}
+
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure whose item with the lowest
+* key is returned.
+*
+* RETURN VALUES
+* Pointer to the map item with the lowest key in the quick map.
+*
+* Pointer to the map end if the quick map was empty.
+*
+* NOTES
+* cl_qmap_head does not remove the item from the map.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_tail, cl_qmap_next, cl_qmap_prev, cl_qmap_end,
+* cl_qmap_item_t
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_tail
+* NAME
+* cl_qmap_tail
+*
+* DESCRIPTION
+* The cl_qmap_tail function returns the map item with the highest key
+* value stored in a quick map.
+*
+* SYNOPSIS
+*/
+static inline cl_map_item_t *cl_qmap_tail(const cl_qmap_t * const p_map)
+{
+ assert(p_map);
+ return ((cl_map_item_t *) p_map->nil.pool_item.list_item.p_prev);
+}
+
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure whose item with the
+* highest key is returned.
+*
+* RETURN VALUES
+* Pointer to the map item with the highest key in the quick map.
+*
+* Pointer to the map end if the quick map was empty.
+*
+* NOTES
+* cl_qmap_end does not remove the item from the map.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_head, cl_qmap_next, cl_qmap_prev, cl_qmap_end,
+* cl_qmap_item_t
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_next
+* NAME
+* cl_qmap_next
+*
+* DESCRIPTION
+* The cl_qmap_next function returns the map item with the next higher
+* key value than a specified map item.
+*
+* SYNOPSIS
+*/
+static inline cl_map_item_t *cl_qmap_next(const cl_map_item_t * const p_item)
+{
+ assert(p_item);
+ return ((cl_map_item_t *) p_item->pool_item.list_item.p_next);
+}
+
+/*
+* PARAMETERS
+* p_item
+* [in] Pointer to a map item whose successor to return.
+*
+* RETURN VALUES
+* Pointer to the map item with the next higher key value in a quick map.
+*
+* Pointer to the map end if the specified item was the last item in
+* the quick map.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_head, cl_qmap_tail, cl_qmap_prev, cl_qmap_end,
+* cl_map_item_t
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_prev
+* NAME
+* cl_qmap_prev
+*
+* DESCRIPTION
+* The cl_qmap_prev function returns the map item with the next lower
+* key value than a precified map item.
+*
+* SYNOPSIS
+*/
+static inline cl_map_item_t *cl_qmap_prev(const cl_map_item_t * const p_item)
+{
+ assert(p_item);
+ return ((cl_map_item_t *) p_item->pool_item.list_item.p_prev);
+}
+
+/*
+* PARAMETERS
+* p_item
+* [in] Pointer to a map item whose predecessor to return.
+*
+* RETURN VALUES
+* Pointer to the map item with the next lower key value in a quick map.
+*
+* Pointer to the map end if the specifid item was the first item in
+* the quick map.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_head, cl_qmap_tail, cl_qmap_next, cl_qmap_end,
+* cl_map_item_t
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_insert
+* NAME
+* cl_qmap_insert
+*
+* DESCRIPTION
+* The cl_qmap_insert function inserts a map item into a quick map.
+* NOTE: Only if such a key does not alerady exist in the map !!!!
+*
+* SYNOPSIS
+*/
+cl_map_item_t *cl_qmap_insert(cl_qmap_t * const p_map,
+ const uint64_t key,
+ cl_map_item_t * const p_item);
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure into which to add the item.
+*
+* key
+* [in] Value to assign to the item.
+*
+* p_item
+* [in] Pointer to a cl_map_item_t stucture to insert into the quick map.
+*
+* RETURN VALUE
+* Pointer to the item in the map with the specified key. If insertion
+* was successful, this is the pointer to the item. If an item with the
+* specified key already exists in the map, the pointer to that item is
+* returned - but the new key is NOT inserted...
+*
+* NOTES
+* Insertion operations may cause the quick map to rebalance.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_remove, cl_map_item_t
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_get
+* NAME
+* cl_qmap_get
+*
+* DESCRIPTION
+* The cl_qmap_get function returns the map item associated with a key.
+*
+* SYNOPSIS
+*/
+cl_map_item_t *cl_qmap_get(const cl_qmap_t * const p_map,
+ const uint64_t key);
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure from which to retrieve the
+* item with the specified key.
+*
+* key
+* [in] Key value used to search for the desired map item.
+*
+* RETURN VALUES
+* Pointer to the map item with the desired key value.
+*
+* Pointer to the map end if there was no item with the desired key value
+* stored in the quick map.
+*
+* NOTES
+* cl_qmap_get does not remove the item from the quick map.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_get_next, cl_qmap_remove
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_get_next
+* NAME
+* cl_qmap_get_next
+*
+* DESCRIPTION
+* The cl_qmap_get_next function returns the first map item associated with a
+* key > the key specified.
+*
+* SYNOPSIS
+*/
+cl_map_item_t *cl_qmap_get_next(const cl_qmap_t * const p_map,
+ const uint64_t key);
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure from which to retrieve the
+* first item with a key > the specified key.
+*
+* key
+* [in] Key value used to search for the desired map item.
+*
+* RETURN VALUES
+* Pointer to the first map item with a key > the desired key value.
+*
+* Pointer to the map end if there was no item with a key > the desired key
+* value stored in the quick map.
+*
+* NOTES
+* cl_qmap_get_next does not remove the item from the quick map.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_get, cl_qmap_remove
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_remove_item
+* NAME
+* cl_qmap_remove_item
+*
+* DESCRIPTION
+* The cl_qmap_remove_item function removes the specified map item
+* from a quick map.
+*
+* SYNOPSIS
+*/
+void
+cl_qmap_remove_item(cl_qmap_t * const p_map,
+ cl_map_item_t * const p_item);
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure from which to
+* remove item.
+*
+* p_item
+* [in] Pointer to a map item to remove from its quick map.
+*
+* RETURN VALUES
+* This function does not return a value.
+*
+* In a debug build, cl_qmap_remove_item asserts that the item being removed
+* is in the specified map.
+*
+* NOTES
+* Removes the map item pointed to by p_item from its quick map.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_remove, cl_qmap_remove_all, cl_qmap_insert
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_remove
+* NAME
+* cl_qmap_remove
+*
+* DESCRIPTION
+* The cl_qmap_remove function removes the map item with the specified key
+* from a quick map.
+*
+* SYNOPSIS
+*/
+cl_map_item_t *cl_qmap_remove(cl_qmap_t * const p_map,
+ const uint64_t key);
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure from which to remove the item
+* with the specified key.
+*
+* key
+* [in] Key value used to search for the map item to remove.
+*
+* RETURN VALUES
+* Pointer to the removed map item if it was found.
+*
+* Pointer to the map end if no item with the specified key exists in the
+* quick map.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_remove_item, cl_qmap_remove_all, cl_qmap_insert
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_remove_all
+* NAME
+* cl_qmap_remove_all
+*
+* DESCRIPTION
+* The cl_qmap_remove_all function removes all items in a quick map,
+* leaving it empty.
+*
+* SYNOPSIS
+*/
+static inline void cl_qmap_remove_all(cl_qmap_t * const p_map)
+{
+ assert(p_map);
+
+ p_map->root.p_left = &p_map->nil;
+ p_map->nil.pool_item.list_item.p_next = &p_map->nil.pool_item.list_item;
+ p_map->nil.pool_item.list_item.p_prev = &p_map->nil.pool_item.list_item;
+ p_map->count = 0;
+}
+
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure to empty.
+*
+* RETURN VALUES
+* This function does not return a value.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_remove, cl_qmap_remove_item
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_merge
+* NAME
+* cl_qmap_merge
+*
+* DESCRIPTION
+* The cl_qmap_merge function moves all items from one map to another,
+* excluding duplicates.
+*
+* SYNOPSIS
+*/
+void
+cl_qmap_merge(cl_qmap_t * const p_dest_map,
+ cl_qmap_t * const p_src_map);
+/*
+* PARAMETERS
+* p_dest_map
+* [out] Pointer to a cl_qmap_t structure to which items should be added.
+*
+* p_src_map
+* [in/out] Pointer to a cl_qmap_t structure whose items to add
+* to p_dest_map.
+*
+* RETURN VALUES
+* This function does not return a value.
+*
+* NOTES
+* Items are evaluated based on their keys only.
+*
+* Upon return from cl_qmap_merge, the quick map referenced by p_src_map
+* contains all duplicate items.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_delta
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_delta
+* NAME
+* cl_qmap_delta
+*
+* DESCRIPTION
+* The cl_qmap_delta function computes the differences between two maps.
+*
+* SYNOPSIS
+*/
+void
+cl_qmap_delta(cl_qmap_t * const p_map1,
+ cl_qmap_t * const p_map2,
+ cl_qmap_t * const p_new, cl_qmap_t * const p_old);
+/*
+* PARAMETERS
+* p_map1
+* [in/out] Pointer to the first of two cl_qmap_t structures whose
+* differences to compute.
+*
+* p_map2
+* [in/out] Pointer to the second of two cl_qmap_t structures whose
+* differences to compute.
+*
+* p_new
+* [out] Pointer to an empty cl_qmap_t structure that contains the
+* items unique to p_map2 upon return from the function.
+*
+* p_old
+* [out] Pointer to an empty cl_qmap_t structure that contains the
+* items unique to p_map1 upon return from the function.
+*
+* RETURN VALUES
+* This function does not return a value.
+*
+* NOTES
+* Items are evaluated based on their keys. Items that exist in both
+* p_map1 and p_map2 remain in their respective maps. Items that
+* exist only p_map1 are moved to p_old. Likewise, items that exist only
+* in p_map2 are moved to p_new. This function can be useful in evaluating
+* changes between two maps.
+*
+* Both maps pointed to by p_new and p_old must be empty on input. This
+* requirement removes the possibility of failures.
+*
+* SEE ALSO
+* Quick Map, cl_qmap_merge
+*********/
+
+/****f* Component Library: Quick Map/cl_qmap_apply_func
+* NAME
+* cl_qmap_apply_func
+*
+* DESCRIPTION
+* The cl_qmap_apply_func function executes a specified function
+* for every item stored in a quick map.
+*
+* SYNOPSIS
+*/
+void
+cl_qmap_apply_func(const cl_qmap_t * const p_map,
+ cl_pfn_qmap_apply_t pfn_func,
+ const void *const context);
+/*
+* PARAMETERS
+* p_map
+* [in] Pointer to a cl_qmap_t structure.
+*
+* pfn_func
+* [in] Function invoked for every item in the quick map.
+* See the cl_pfn_qmap_apply_t function type declaration for
+* details about the callback function.
+*
+* context
+* [in] Value to pass to the callback functions to provide context.
+*
+* RETURN VALUE
+* This function does not return a value.
+*
+* NOTES
+* The function provided must not perform any map operations, as these
+* would corrupt the quick map.
+*
+* SEE ALSO
+* Quick Map, cl_pfn_qmap_apply_t
+*********/
+
+#endif /* _CL_QMAP_H_ */
diff --git a/src/rc-compat/v37/util/compiler.h b/src/rc-compat/v37/util/compiler.h
new file mode 100644
index 000000000000..dfce82f18841
--- /dev/null
+++ b/src/rc-compat/v37/util/compiler.h
@@ -0,0 +1,54 @@
+/* GPLv2 or OpenIB.org BSD (MIT) See COPYING file */
+#ifndef UTIL_COMPILER_H
+#define UTIL_COMPILER_H
+
+/* Use to tag a variable that causes compiler warnings. Use as:
+ int uninitialized_var(sz)
+
+ This is only enabled for old compilers. gcc 6.x and beyond have excellent
+ static flow analysis. If code solicits a warning from 6.x it is almost
+ certainly too complex for a human to understand. For some reason powerpc
+ uses a different scheme than gcc for flow analysis.
+*/
+#if (__GNUC__ >= 6 && !defined(__powerpc__)) || defined(__clang__)
+#define uninitialized_var(x) x
+#else
+#define uninitialized_var(x) x = x
+#endif
+
+#ifndef likely
+#ifdef __GNUC__
+#define likely(x) __builtin_expect(!!(x), 1)
+#else
+#define likely(x) (x)
+#endif
+#endif
+
+#ifndef unlikely
+#ifdef __GNUC__
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#else
+#define unlikely(x) (x)
+#endif
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_ALWAYS_INLINE
+#define ALWAYS_INLINE __attribute__((always_inline))
+#else
+#define ALWAYS_INLINE
+#endif
+
+/* Use to mark fall through on switch statements as desired. */
+#if __GNUC__ >= 7
+#define SWITCH_FALLTHROUGH __attribute__ ((fallthrough))
+#else
+#define SWITCH_FALLTHROUGH
+#endif
+
+#ifdef __CHECKER__
+# define __force __attribute__((force))
+#else
+# define __force
+#endif
+
+#endif
diff --git a/src/rc-compat/v37/util/mmio.h b/src/rc-compat/v37/util/mmio.h
new file mode 100644
index 000000000000..101af9dd332d
--- /dev/null
+++ b/src/rc-compat/v37/util/mmio.h
@@ -0,0 +1,267 @@
+/* GPLv2 or OpenIB.org BSD (MIT) See COPYING file
+
+ These accessors always map to PCI-E TLPs in predictable ways. Translation
+ to other buses should follow similar definitions.
+
+ write32(mem, 1)
+ Produce a 4 byte MemWr TLP with bit 0 of DW byte offset 0 set
+ write32_be(mem, htobe32(1))
+ Produce a 4 byte MemWr TLP with bit 0 of DW byte offset 3 set
+ write32_le(mem, htole32(1))
+ Produce a 4 byte MemWr TLP with bit 0 of DW byte offset 0 set
+
+ For ordering these accessors are similar to the Kernel's concept of
+ writel_relaxed(). When working with UC memory the following hold:
+
+ 1) Strong ordering is required when talking to the same device (eg BAR),
+ and combining is not permitted:
+
+ write32(mem, 1);
+ write32(mem + 4, 1);
+ write32(mem, 1);
+
+ Must produce three TLPs, in order.
+
+ 2) Ordering ignores all pthread locking:
+
+ pthread_spin_lock(&lock);
+ write32(mem, global++);
+ pthread_spin_unlock(&lock);
+
+ When run concurrently on all CPUs the device must observe all stores,
+ but the data value will not be strictly increasing.
+
+ 3) Interaction with DMA is not ordered. Explicit use of a barrier from
+ udma_barriers is required:
+
+ *dma_mem = 1;
+ udma_to_device_barrier();
+ write32(mem, GO_DMA);
+
+ 4) Access out of program order (eg speculation), either by the CPU or
+ compiler is not permitted:
+
+ if (cond)
+ read32();
+
+ Must not issue a read TLP if cond is false.
+
+ If these are used with WC memory then #1 and #4 do not apply, and all WC
+ accesses must be bracketed with mmio_wc_start() // mmio_flush_writes()
+*/
+
+#ifndef __UTIL_MMIO_H
+#define __UTIL_MMIO_H
+
+#include <linux/types.h>
+#include <stdatomic.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <endian.h>
+
+#include <config.h>
+#include <util/compiler.h>
+
+/* The first step is to define the 'raw' accessors. To make this very safe
+ with sparse we define two versions of each, a le and a be - however the
+ code is always identical.
+*/
+#ifdef __s390x__
+#include <unistd.h>
+#include <sys/syscall.h>
+
+/* s390 requires a privileged instruction to access IO memory, these syscalls
+ perform that instruction using a memory buffer copy semantic.
+*/
+static inline void s390_mmio_write(void *mmio_addr, const void *val,
+ size_t length)
+{
+ // FIXME: Check for error and call abort?
+ syscall(__NR_s390_pci_mmio_write, mmio_addr, val, length);
+}
+
+static inline void s390_mmio_read(const void *mmio_addr, void *val,
+ size_t length)
+{
+ // FIXME: Check for error and call abort?
+ syscall(__NR_s390_pci_mmio_read, mmio_addr, val, length);
+}
+
+#define MAKE_WRITE(_NAME_, _SZ_) \
+ static inline void _NAME_##_be(void *addr, __be##_SZ_ value) \
+ { \
+ s390_mmio_write(addr, &value, sizeof(value)); \
+ } \
+ static inline void _NAME_##_le(void *addr, __le##_SZ_ value) \
+ { \
+ s390_mmio_write(addr, &value, sizeof(value)); \
+ }
+#define MAKE_READ(_NAME_, _SZ_) \
+ static inline __be##_SZ_ _NAME_##_be(const void *addr) \
+ { \
+ __be##_SZ_ res; \
+ s390_mmio_read(addr, &res, sizeof(res)); \
+ return res; \
+ } \
+ static inline __le##_SZ_ _NAME_##_le(const void *addr) \
+ { \
+ __le##_SZ_ res; \
+ s390_mmio_read(addr, &res, sizeof(res)); \
+ return res; \
+ }
+
+static inline void mmio_write8(void *addr, uint8_t value)
+{
+ s390_mmio_write(addr, &value, sizeof(value));
+}
+
+static inline uint8_t mmio_read8(const void *addr)
+{
+ uint8_t res;
+ s390_mmio_read(addr, &res, sizeof(res));
+ return res;
+}
+
+#else /* __s390x__ */
+
+#define MAKE_WRITE(_NAME_, _SZ_) \
+ static inline void _NAME_##_be(void *addr, __be##_SZ_ value) \
+ { \
+ atomic_store_explicit((_Atomic(uint##_SZ_##_t) *)addr, \
+ (__force uint##_SZ_##_t)value, \
+ memory_order_relaxed); \
+ } \
+ static inline void _NAME_##_le(void *addr, __le##_SZ_ value) \
+ { \
+ atomic_store_explicit((_Atomic(uint##_SZ_##_t) *)addr, \
+ (__force uint##_SZ_##_t)value, \
+ memory_order_relaxed); \
+ }
+#define MAKE_READ(_NAME_, _SZ_) \
+ static inline __be##_SZ_ _NAME_##_be(const void *addr) \
+ { \
+ return (__force __be##_SZ_)atomic_load_explicit( \
+ (_Atomic(uint##_SZ_##_t) *)addr, memory_order_relaxed); \
+ } \
+ static inline __le##_SZ_ _NAME_##_le(const void *addr) \
+ { \
+ return (__force __le##_SZ_)atomic_load_explicit( \
+ (_Atomic(uint##_SZ_##_t) *)addr, memory_order_relaxed); \
+ }
+
+static inline void mmio_write8(void *addr, uint8_t value)
+{
+ atomic_store_explicit((_Atomic(uint8_t) *)addr, value,
+ memory_order_relaxed);
+}
+static inline uint8_t mmio_read8(const void *addr)
+{
+ return atomic_load_explicit((_Atomic(uint32_t) *)addr,
+ memory_order_relaxed);
+}
+#endif /* __s390x__ */
+
+MAKE_WRITE(mmio_write16, 16)
+MAKE_WRITE(mmio_write32, 32)
+
+MAKE_READ(mmio_read16, 16)
+MAKE_READ(mmio_read32, 32)
+
+#if SIZEOF_LONG == 8
+MAKE_WRITE(mmio_write64, 64)
+MAKE_READ(mmio_read64, 64)
+#else
+void mmio_write64_be(void *addr, __be64 val);
+static inline void mmio_write64_le(void *addr, __le64 val)
+{
+ mmio_write64_be(addr, (__be64 __force)val);
+}
+
+/* There is no way to do read64 atomically, rather than provide some sketchy
+ implementation we leave these functions undefined, users should not call
+ them if SIZEOF_LONG != 8, but instead implement an appropriate version.
+*/
+__be64 mmio_read64_be(const void *addr);
+__le64 mmio_read64_le(const void *addr);
+#endif /* SIZEOF_LONG == 8 */
+
+#undef MAKE_WRITE
+#undef MAKE_READ
+
+/* Now we can define the host endian versions of the operator, this just includes
+ a call to htole.
+*/
+#define MAKE_WRITE(_NAME_, _SZ_) \
+ static inline void _NAME_(void *addr, uint##_SZ_##_t value) \
+ { \
+ _NAME_##_le(addr, htole##_SZ_(value)); \
+ }
+#define MAKE_READ(_NAME_, _SZ_) \
+ static inline uint##_SZ_##_t _NAME_(const void *addr) \
+ { \
+ return le##_SZ_##toh(_NAME_##_le(addr)); \
+ }
+
+/* This strictly guarantees the order of TLP generation for the memory copy to
+ be in ascending address order.
+*/
+#ifdef __s390x__
+static inline void mmio_memcpy_x64(void *dest, const void *src, size_t bytecnt)
+{
+ s390_mmio_write(dest, src, bytecnt);
+}
+#else
+
+/* Transfer is some multiple of 64 bytes */
+static inline void mmio_memcpy_x64(void *dest, const void *src, size_t bytecnt)
+{
+ uintptr_t *dst_p = dest;
+
+ /* Caller must guarantee:
+ assert(bytecnt != 0);
+ assert((bytecnt % 64) == 0);
+ assert(((uintptr_t)dest) % __alignof__(*dst) == 0);
+ assert(((uintptr_t)src) % __alignof__(*dst) == 0);
+ */
+
+ /* Use the native word size for the copy */
+ if (sizeof(*dst_p) == 8) {
+ const __be64 *src_p = src;
+
+ do {
+ /* Do 64 bytes at a time */
+ mmio_write64_be(dst_p++, *src_p++);
+ mmio_write64_be(dst_p++, *src_p++);
+ mmio_write64_be(dst_p++, *src_p++);
+ mmio_write64_be(dst_p++, *src_p++);
+ mmio_write64_be(dst_p++, *src_p++);
+ mmio_write64_be(dst_p++, *src_p++);
+ mmio_write64_be(dst_p++, *src_p++);
+ mmio_write64_be(dst_p++, *src_p++);
+
+ bytecnt -= 8 * sizeof(*dst_p);
+ } while (bytecnt > 0);
+ } else if (sizeof(*dst_p) == 4) {
+ const __be32 *src_p = src;
+
+ do {
+ mmio_write32_be(dst_p++, *src_p++);
+ mmio_write32_be(dst_p++, *src_p++);
+ bytecnt -= 2 * sizeof(*dst_p);
+ } while (bytecnt > 0);
+ }
+}
+#endif
+
+MAKE_WRITE(mmio_write16, 16)
+MAKE_WRITE(mmio_write32, 32)
+MAKE_WRITE(mmio_write64, 64)
+
+MAKE_READ(mmio_read16, 16)
+MAKE_READ(mmio_read32, 32)
+MAKE_READ(mmio_read64, 64)
+
+#undef MAKE_WRITE
+#undef MAKE_READ
+
+#endif
diff --git a/src/rc-compat/v37/util/node_name_map.h b/src/rc-compat/v37/util/node_name_map.h
new file mode 100644
index 000000000000..e78d274b116e
--- /dev/null
+++ b/src/rc-compat/v37/util/node_name_map.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved.
+ *
+ * Connect to opensm's cl_nodenamemap.h if it is available.
+ */
+#ifndef __LIBUTIL_NODE_NAME_MAP_H__
+#define __LIBUTIL_NODE_NAME_MAP_H__
+
+#include <stdint.h>
+
+struct nn_map;
+typedef struct nn_map nn_map_t;
+
+nn_map_t *open_node_name_map(const char *node_name_map);
+void close_node_name_map(nn_map_t *map);
+/* NOTE: parameter "nodedesc" may be modified here. */
+char *remap_node_name(nn_map_t *map, uint64_t target_guid, char *nodedesc);
+char *clean_nodedesc(char *nodedesc);
+
+#endif
diff --git a/src/rc-compat/v37/util/rdma_nl.h b/src/rc-compat/v37/util/rdma_nl.h
new file mode 100644
index 000000000000..9c0916978283
--- /dev/null
+++ b/src/rc-compat/v37/util/rdma_nl.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef UTIL_RDMA_NL_H
+#define UTIL_RDMA_NL_H
+
+#include <stdbool.h>
+
+#include <rdma/rdma_netlink.h>
+#include <netlink/netlink.h>
+#include <netlink/msg.h>
+#include <netlink/attr.h>
+
+extern struct nla_policy rdmanl_policy[RDMA_NLDEV_ATTR_MAX];
+struct nl_sock *rdmanl_socket_alloc(void);
+int rdmanl_get_devices(struct nl_sock *nl, nl_recvmsg_msg_cb_t cb_func,
+ void *data);
+int rdmanl_get_chardev(struct nl_sock *nl, int ibidx, const char *name,
+ nl_recvmsg_msg_cb_t cb_func, void *data);
+bool get_copy_on_fork(void);
+int rdmanl_get_copy_on_fork(struct nl_sock *nl, nl_recvmsg_msg_cb_t cb_func,
+ void *data);
+
+#endif
diff --git a/src/rc-compat/v37/util/symver.h b/src/rc-compat/v37/util/symver.h
new file mode 100644
index 000000000000..eb14c57ebdc1
--- /dev/null
+++ b/src/rc-compat/v37/util/symver.h
@@ -0,0 +1,107 @@
+/* GPLv2 or OpenIB.org BSD (MIT) See COPYING file
+
+ These definitions help using the ELF symbol version feature, and must be
+ used in conjunction with the library's map file.
+ */
+
+#ifndef __UTIL_SYMVER_H
+#define __UTIL_SYMVER_H
+
+#include <config.h>
+#include <ccan/str.h>
+
+/*
+ These macros should only be used if the library is defining compatibility
+ symbols, eg:
+
+ 213: 000000000000a650 315 FUNC GLOBAL DEFAULT 13 ibv_get_device_list@IBVERBS_1.0
+ 214: 000000000000b020 304 FUNC GLOBAL DEFAULT 13 ibv_get_device_list@@IBVERBS_1.1
+
+ Symbols which have only a single implementation should use a normal extern
+ function and be placed in the correct stanza in the linker map file.
+
+ Follow this pattern to use this feature:
+ public.h:
+ struct ibv_device **ibv_get_device_list(int *num_devices);
+ foo.c:
+ // Implement the latest version
+ LATEST_SYMVER_FUNC(ibv_get_device_list, 1_1, "IBVERBS_1.1",
+ struct ibv_device **,
+ int *num_devices)
+ {
+ ...
+ }
+
+ // Implement the compat version
+ COMPAT_SYMVER_FUNC(ibv_get_device_list, 1_0, "IBVERBS_1.0",
+ struct ibv_device_1_0 **,
+ int *num_devices)
+ {
+ ...
+ }
+
+ As well as matching information in the map file.
+
+ These macros deal with the various uglyness in gcc surrounding symbol
+ versions
+
+ - The internal name __public_1_x is synthesized by the macro
+ - A prototype for the internal name is created by the macro
+ - If statically linking the latest symbol expands into a normal function
+ definition
+ - If statically linking the compat symbols expand into unused static
+ functions are are discarded by the compiler.
+ - The prototype of the latest symbol is checked against the public
+ prototype (only when compiling statically)
+
+ The extra prototypes are included only to avoid -Wmissing-prototypes
+ warnings. See also Documentation/versioning.md
+*/
+
+#if HAVE_FUNC_ATTRIBUTE_SYMVER
+#define _MAKE_SYMVER(_local_sym, _public_sym, _ver_str) \
+ __attribute__((__symver__(#_public_sym "@" _ver_str)))
+#else
+#define _MAKE_SYMVER(_local_sym, _public_sym, _ver_str) \
+ asm(".symver " #_local_sym "," #_public_sym "@" _ver_str);
+#endif
+#define _MAKE_SYMVER_FUNC(_public_sym, _uniq, _ver_str, _ret, ...) \
+ _ret __##_public_sym##_##_uniq(__VA_ARGS__); \
+ _MAKE_SYMVER(__##_public_sym##_##_uniq, _public_sym, _ver_str) \
+ _ret __##_public_sym##_##_uniq(__VA_ARGS__)
+
+#if defined(HAVE_FULL_SYMBOL_VERSIONS) && !defined(_STATIC_LIBRARY_BUILD_)
+
+ // Produce all symbol versions for dynamic linking
+
+# define COMPAT_SYMVER_FUNC(_public_sym, _uniq, _ver_str, _ret, ...) \
+ _MAKE_SYMVER_FUNC(_public_sym, _uniq, _ver_str, _ret, __VA_ARGS__)
+# define LATEST_SYMVER_FUNC(_public_sym, _uniq, _ver_str, _ret, ...) \
+ _MAKE_SYMVER_FUNC(_public_sym, _uniq, "@" _ver_str, _ret, __VA_ARGS__)
+
+#elif defined(HAVE_LIMITED_SYMBOL_VERSIONS) && !defined(_STATIC_LIBRARY_BUILD_)
+
+ /* Produce only implemenations for the latest symbol and tag it with the
+ * correct symbol versions. This supports dynamic linkers that do not
+ * understand symbol versions
+ */
+# define COMPAT_SYMVER_FUNC(_public_sym, _uniq, _ver_str, _ret, ...) \
+ static inline _ret __##_public_sym##_##_uniq(__VA_ARGS__)
+# define LATEST_SYMVER_FUNC(_public_sym, _uniq, _ver_str, _ret, ...) \
+ _MAKE_SYMVER_FUNC(_public_sym, _uniq, "@" _ver_str, _ret, __VA_ARGS__)
+
+#else
+
+ // Static linking, or linker does not support symbol versions
+#define COMPAT_SYMVER_FUNC(_public_sym, _uniq, _ver_str, _ret, ...) \
+ static inline __attribute__((unused)) \
+ _ret __##_public_sym##_##_uniq(__VA_ARGS__)
+#define LATEST_SYMVER_FUNC(_public_sym, _uniq, _ver_str, _ret, ...) \
+ static __attribute__((unused)) \
+ _ret __##_public_sym##_##_uniq(__VA_ARGS__) \
+ __attribute__((alias(stringify(_public_sym)))); \
+ extern _ret _public_sym(__VA_ARGS__)
+
+#endif
+
+#endif
diff --git a/src/rc-compat/v37/util/udma_barrier.h b/src/rc-compat/v37/util/udma_barrier.h
new file mode 100644
index 000000000000..5730576e6356
--- /dev/null
+++ b/src/rc-compat/v37/util/udma_barrier.h
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __UTIL_UDMA_BARRIER_H
+#define __UTIL_UDMA_BARRIER_H
+
+#include <pthread.h>
+
+/* Barriers for DMA.
+
+ These barriers are expliclty only for use with user DMA operations. If you
+ are looking for barriers to use with cache-coherent multi-threaded
+ consitency then look in stdatomic.h. If you need both kinds of synchronicity
+ for the same address then use an atomic operation followed by one
+ of these barriers.
+
+ When reasoning about these barriers there are two objects:
+ - CPU attached address space (the CPU memory could be a range of things:
+ cached/uncached/non-temporal CPU DRAM, uncached MMIO space in another
+ device, pMEM). Generally speaking the ordering is only relative
+ to the local CPU's view of the system. Eg if the local CPU
+ is not guaranteed to see a write from another CPU then it is also
+ OK for the DMA device to also not see the write after the barrier.
+ - A DMA initiator on a bus. For instance a PCI-E device issuing
+ MemRd/MemWr TLPs.
+
+ The ordering guarantee is always stated between those two streams. Eg what
+ happens if a MemRd TLP is sent in via PCI-E relative to a CPU WRITE to the
+ same memory location.
+
+ The providers have a very regular and predictable use of these barriers,
+ to make things very clear each narrow use is given a name and the proper
+ name should be used in the provider as a form of documentation.
+*/
+
+/* Ensure that the device's view of memory matches the CPU's view of memory.
+ This should be placed before any MMIO store that could trigger the device
+ to begin doing DMA, such as a device doorbell ring.
+
+ eg
+ *dma_buf = 1;
+ udma_to_device_barrier();
+ mmio_write(DO_DMA_REG, dma_buf);
+ Must ensure that the device sees the '1'.
+
+ This is required to fence writes created by the libibverbs user. Those
+ writes could be to any CPU mapped memory object with any cachability mode.
+
+ NOTE: x86 has historically used a weaker semantic for this barrier, and
+ only fenced normal stores to normal memory. libibverbs users using other
+ memory types or non-temporal stores are required to use SFENCE in their own
+ code prior to calling verbs to start a DMA.
+*/
+#if defined(__i386__)
+#define udma_to_device_barrier() asm volatile("" ::: "memory")
+#elif defined(__x86_64__)
+#define udma_to_device_barrier() asm volatile("" ::: "memory")
+#elif defined(__PPC64__)
+#define udma_to_device_barrier() asm volatile("sync" ::: "memory")
+#elif defined(__PPC__)
+#define udma_to_device_barrier() asm volatile("sync" ::: "memory")
+#elif defined(__ia64__)
+#define udma_to_device_barrier() asm volatile("mf" ::: "memory")
+#elif defined(__sparc_v9__)
+#define udma_to_device_barrier() asm volatile("membar #StoreStore" ::: "memory")
+#elif defined(__aarch64__)
+#define udma_to_device_barrier() asm volatile("dsb st" ::: "memory");
+#elif defined(__sparc__) || defined(__s390x__)
+#define udma_to_device_barrier() asm volatile("" ::: "memory")
+#elif defined(__loongarch__)
+#define udma_to_device_barrier() asm volatile("dbar 0" ::: "memory")
+#else
+#error No architecture specific memory barrier defines found!
+#endif
+
+/* Ensure that all ordered stores from the device are observable from the
+ CPU. This only makes sense after something that observes an ordered store
+ from the device - eg by reading a MMIO register or seeing that CPU memory is
+ updated.
+
+ This guarantees that all reads that follow the barrier see the ordered
+ stores that preceded the observation.
+
+ For instance, this would be used after testing a valid bit in a memory
+ that is a DMA target, to ensure that the following reads see the
+ data written before the MemWr TLP that set the valid bit.
+*/
+#if defined(__i386__)
+#define udma_from_device_barrier() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
+#elif defined(__x86_64__)
+#define udma_from_device_barrier() asm volatile("lfence" ::: "memory")
+#elif defined(__PPC64__)
+#define udma_from_device_barrier() asm volatile("lwsync" ::: "memory")
+#elif defined(__PPC__)
+#define udma_from_device_barrier() asm volatile("sync" ::: "memory")
+#elif defined(__ia64__)
+#define udma_from_device_barrier() asm volatile("mf" ::: "memory")
+#elif defined(__sparc_v9__)
+#define udma_from_device_barrier() asm volatile("membar #LoadLoad" ::: "memory")
+#elif defined(__aarch64__)
+#define udma_from_device_barrier() asm volatile("dsb ld" ::: "memory");
+#elif defined(__sparc__) || defined(__s390x__)
+#define udma_from_device_barrier() asm volatile("" ::: "memory")
+#elif defined(__loongarch__)
+#define udma_from_device_barrier() asm volatile("dbar 0" ::: "memory")
+#else
+#error No architecture specific memory barrier defines found!
+#endif
+
+/* Order writes to CPU memory so that a DMA device cannot view writes after
+ the barrier without also seeing all writes before the barrier. This does
+ not guarantee any writes are visible to DMA.
+
+ This would be used in cases where a DMA buffer might have a valid bit and
+ data, this barrier is placed after writing the data but before writing the
+ valid bit to ensure the DMA device cannot observe a set valid bit with
+ unwritten data.
+
+ Compared to udma_to_device_barrier() this barrier is not required to fence
+ anything but normal stores to normal malloc memory. Usage should be:
+
+ write_wqe
+ udma_to_device_barrier(); // Get user memory ready for DMA
+ wqe->addr = ...;
+ wqe->flags = ...;
+ udma_ordering_write_barrier(); // Guarantee WQE written in order
+ wqe->valid = 1;
+*/
+#define udma_ordering_write_barrier() udma_to_device_barrier()
+
+/* Promptly flush writes to MMIO Write Cominbing memory.
+ This should be used after a write to WC memory. This is both a barrier
+ and a hint to the CPU to flush any buffers to reduce latency to TLP
+ generation.
+
+ This is not required to have any effect on CPU memory.
+
+ If done while holding a lock then the ordering of MMIO writes across CPUs
+ must be guaranteed to follow the natural ordering implied by the lock.
+
+ This must also act as a barrier that prevents write combining, eg
+ *wc_mem = 1;
+ mmio_flush_writes();
+ *wc_mem = 2;
+ Must always produce two MemWr TLPs, '1' and '2'. Without the barrier
+ the CPU is allowed to produce a single TLP '2'.
+
+ Note that there is no order guarantee for writes to WC memory without
+ barriers.
+
+ This is intended to be used in conjunction with WC memory to generate large
+ PCI-E MemWr TLPs from the CPU.
+*/
+#if defined(__i386__)
+#define mmio_flush_writes() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
+#elif defined(__x86_64__)
+#define mmio_flush_writes() asm volatile("sfence" ::: "memory")
+#elif defined(__PPC64__)
+#define mmio_flush_writes() asm volatile("sync" ::: "memory")
+#elif defined(__PPC__)
+#define mmio_flush_writes() asm volatile("sync" ::: "memory")
+#elif defined(__ia64__)
+#define mmio_flush_writes() asm volatile("fwb" ::: "memory")
+#elif defined(__sparc_v9__)
+#define mmio_flush_writes() asm volatile("membar #StoreStore" ::: "memory")
+#elif defined(__aarch64__)
+#define mmio_flush_writes() asm volatile("dsb st" ::: "memory");
+#elif defined(__sparc__) || defined(__s390x__)
+#define mmio_flush_writes() asm volatile("" ::: "memory")
+#elif defined(__loongarch__)
+#define mmio_flush_writes() asm volatile("dbar 0" ::: "memory")
+#else
+#error No architecture specific memory barrier defines found!
+#endif
+
+/* Prevent WC writes from being re-ordered relative to other MMIO
+ writes. This should be used before a write to WC memory.
+
+ This must act as a barrier to prevent write re-ordering from different
+ memory types:
+ *mmio_mem = 1;
+ mmio_flush_writes();
+ *wc_mem = 2;
+ Must always produce a TLP '1' followed by '2'.
+
+ This barrier implies udma_to_device_barrier()
+
+ This is intended to be used in conjunction with WC memory to generate large
+ PCI-E MemWr TLPs from the CPU.
+*/
+#define mmio_wc_start() mmio_flush_writes()
+
+/* Keep MMIO writes in order.
+ Currently we lack writel macros that universally guarantee MMIO
+ writes happen in order, like the kernel does. Even worse many
+ providers haphazardly open code writes to MMIO memory omitting even
+ volatile.
+
+ Until this can be fixed with a proper writel macro, this barrier
+ is a stand in to indicate places where MMIO writes should be switched
+ to some future writel.
+*/
+#define mmio_ordered_writes_hack() mmio_flush_writes()
+
+/* Write Combining Spinlock primitive
+
+ Any access to a multi-value WC region must ensure that multiple cpus do not
+ write to the same values concurrently, these macros make that
+ straightforward and efficient if the choosen exclusion is a spinlock.
+
+ The spinlock guarantees that the WC writes issued within the critical
+ section are made visible as TLP to the device. The TLP must be seen by the
+ device strictly in the order that the spinlocks are acquired, and combining
+ WC writes between different sections is not permitted.
+
+ Use of these macros allow the fencing inside the spinlock to be combined
+ with the fencing required for DMA.
+ */
+static inline void mmio_wc_spinlock(pthread_spinlock_t *lock)
+{
+ pthread_spin_lock(lock);
+#if !defined(__i386__) && !defined(__x86_64__)
+ /* For x86 the serialization within the spin lock is enough to
+ * strongly order WC and other memory types. */
+ mmio_wc_start();
+#endif
+}
+
+static inline void mmio_wc_spinunlock(pthread_spinlock_t *lock)
+{
+ /* It is possible that on x86 the atomic in the lock is strong enough
+ * to force-flush the WC buffers quickly, and this SFENCE can be
+ * omitted too. */
+ mmio_flush_writes();
+ pthread_spin_unlock(lock);
+}
+
+#endif
diff --git a/src/rc-compat/v37/util/util.h b/src/rc-compat/v37/util/util.h
new file mode 100644
index 000000000000..45f50658519a
--- /dev/null
+++ b/src/rc-compat/v37/util/util.h
@@ -0,0 +1,93 @@
+/* GPLv2 or OpenIB.org BSD (MIT) See COPYING file */
+#ifndef UTIL_UTIL_H
+#define UTIL_UTIL_H
+
+#include <ccan/ilog.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <stdio.h>
+
+/* Return true if the snprintf succeeded, false if there was truncation or
+ * error */
+static inline bool __good_snprintf(size_t len, int rc)
+{
+ return (rc < len && rc >= 0);
+}
+
+#define check_snprintf(buf, len, fmt, ...) \
+ __good_snprintf(len, snprintf(buf, len, fmt, ##__VA_ARGS__))
+
+/* a CMP b. See also the BSD macro timercmp(). */
+#define ts_cmp(a, b, CMP) \
+ (((a)->tv_sec == (b)->tv_sec) ? \
+ ((a)->tv_nsec CMP (b)->tv_nsec) : \
+ ((a)->tv_sec CMP (b)->tv_sec))
+
+#define offsetofend(_type, _member) \
+ (offsetof(_type, _member) + sizeof(((_type *)0)->_member))
+
+#define BITS_PER_LONG (8 * sizeof(long))
+#define BITS_PER_LONG_LONG (8 * sizeof(long long))
+
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+/**
+ * FIELD_PREP() - prepare a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ */
+#define FIELD_PREP(_mask, _val) \
+ ({ \
+ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ })
+
+/**
+ * FIELD_GET() - extract a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_reg: value of entire bitfield
+ *
+ * FIELD_GET() extracts the field specified by @_mask from the
+ * bitfield passed in as @_reg by masking and shifting it down.
+ */
+#define FIELD_GET(_mask, _reg) \
+ ({ \
+ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ })
+
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
+static inline unsigned long align_down(unsigned long val, unsigned long _align)
+{
+ return align(val - (_align - 1), _align);
+}
+
+static inline uint64_t roundup_pow_of_two(uint64_t n)
+{
+ return n == 1 ? 1 : 1ULL << ilog64(n - 1);
+}
+
+static inline unsigned long DIV_ROUND_UP(unsigned long n, unsigned long d)
+{
+ return (n + d - 1) / d;
+}
+
+int set_fd_nonblock(int fd, bool nonblock);
+
+int open_cdev(const char *devname_hint, dev_t cdev);
+
+unsigned int get_random(void);
+#endif
--
2.29.2