/* ACLE support for AArch64 SVE (function shapes)
Copyright (C) 2018-2020 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "tm_p.h"
#include "memmodel.h"
#include "insn-codes.h"
#include "optabs.h"
#include "aarch64-sve-builtins.h"
#include "aarch64-sve-builtins-shapes.h"
/* In the comments below, _t0 represents the first type suffix and _t1
represents the second. Square brackets enclose characters that are
present in only the full name, not the overloaded name. Governing
predicate arguments and predicate suffixes are not shown, since they
depend on the predication type, which is a separate piece of
information from the shape.
Non-overloaded functions may have additional suffixes beyond the
ones shown, if those suffixes don't affect the types in the type
signature. E.g. the predicate form of svtrn1 has a _b suffix,
but this does not affect the prototype, which is always
"svbool_t(svbool_t, svbool_t)". */
namespace aarch64_sve {
/* Return a representation of "const T *". */
static tree
build_const_pointer (tree t)
{
return build_pointer_type (build_qualified_type (t, TYPE_QUAL_CONST));
}
/* If INSTANCE has a governing predicate, add it to the list of argument
types in ARGUMENT_TYPES. RETURN_TYPE is the type returned by the
function. */
static void
apply_predication (const function_instance &instance, tree return_type,
vec &argument_types)
{
if (instance.pred != PRED_none)
{
argument_types.quick_insert (0, get_svbool_t ());
/* For unary merge operations, the first argument is a vector with
the same type as the result. For unary_convert_narrowt it also
provides the "bottom" half of active elements, and is present
for all types of predication. */
if ((argument_types.length () == 2 && instance.pred == PRED_m)
|| instance.shape == shapes::unary_convert_narrowt)
argument_types.quick_insert (0, return_type);
}
}
/* Parse and move past an element type in FORMAT and return it as a type
suffix. The format is:
[01] - the element type in type suffix 0 or 1 of INSTANCE
f - a floating-point type with the given number of bits
f[01] - a floating-point type with the same width as type suffix 0 or 1
B - bfloat16_t
h - a half-sized version of
p - a predicate (represented as TYPE_SUFFIX_b)
q - a quarter-sized version of
s - a signed type with the given number of bits
s[01] - a signed type with the same width as type suffix 0 or 1
u - an unsigned type with the given number of bits
u[01] - an unsigned type with the same width as type suffix 0 or 1
w - a 64-bit version of if is integral, otherwise
where is another element type. */
static type_suffix_index
parse_element_type (const function_instance &instance, const char *&format)
{
int ch = *format++;
if (ch == 'f' || ch == 's' || ch == 'u')
{
type_class_index tclass = (ch == 'f' ? TYPE_float
: ch == 's' ? TYPE_signed
: TYPE_unsigned);
char *end;
unsigned int bits = strtol (format, &end, 10);
format = end;
if (bits == 0 || bits == 1)
bits = instance.type_suffix (bits).element_bits;
return find_type_suffix (tclass, bits);
}
if (ch == 'w')
{
type_suffix_index suffix = parse_element_type (instance, format);
if (type_suffixes[suffix].integer_p)
return find_type_suffix (type_suffixes[suffix].tclass, 64);
return suffix;
}
if (ch == 'p')
return TYPE_SUFFIX_b;
if (ch == 'B')
return TYPE_SUFFIX_bf16;
if (ch == 'q')
{
type_suffix_index suffix = parse_element_type (instance, format);
return find_type_suffix (type_suffixes[suffix].tclass,
type_suffixes[suffix].element_bits / 4);
}
if (ch == 'h')
{
type_suffix_index suffix = parse_element_type (instance, format);
/* Widening and narrowing doesn't change the type for predicates;
everything's still an svbool_t. */
if (suffix == TYPE_SUFFIX_b)
return suffix;
return find_type_suffix (type_suffixes[suffix].tclass,
type_suffixes[suffix].element_bits / 2);
}
if (ch == '0' || ch == '1')
return instance.type_suffix_ids[ch - '0'];
gcc_unreachable ();
}
/* Read and return a type from FORMAT for function INSTANCE. Advance
FORMAT beyond the type string. The format is:
_ - void
al - array pointer for loads
ap - array pointer for prefetches
as - array pointer for stores
b - base vector type (from a _base suffix)
d - displacement vector type (from a _index or _offset suffix)
e - an enum with the given name
s - a scalar type with the given element suffix
t - a vector or tuple type with given element suffix [*1]
v - a vector with the given element suffix
where has the format described above parse_element_type
[*1] the vectors_per_tuple function indicates whether the type should
be a tuple, and if so, how many vectors it should contain. */
static tree
parse_type (const function_instance &instance, const char *&format)
{
int ch = *format++;
if (ch == '_')
return void_type_node;
if (ch == 'a')
{
ch = *format++;
if (ch == 'l')
return build_const_pointer (instance.memory_scalar_type ());
if (ch == 'p')
return const_ptr_type_node;
if (ch == 's')
return build_pointer_type (instance.memory_scalar_type ());
gcc_unreachable ();
}
if (ch == 'b')
return instance.base_vector_type ();
if (ch == 'd')
return instance.displacement_vector_type ();
if (ch == 'e')
{
if (strncmp (format, "pattern", 7) == 0)
{
format += 7;
return acle_svpattern;
}
if (strncmp (format, "prfop", 5) == 0)
{
format += 5;
return acle_svprfop;
}
gcc_unreachable ();
}
if (ch == 's')
{
type_suffix_index suffix = parse_element_type (instance, format);
return scalar_types[type_suffixes[suffix].vector_type];
}
if (ch == 't')
{
type_suffix_index suffix = parse_element_type (instance, format);
vector_type_index vector_type = type_suffixes[suffix].vector_type;
unsigned int num_vectors = instance.vectors_per_tuple ();
return acle_vector_types[num_vectors - 1][vector_type];
}
if (ch == 'v')
{
type_suffix_index suffix = parse_element_type (instance, format);
return acle_vector_types[0][type_suffixes[suffix].vector_type];
}
gcc_unreachable ();
}
/* Read and move past any argument count at FORMAT for the function
signature of INSTANCE. The counts are:
*q: one argument per element in a 128-bit quadword (as for svdupq)
*t: one argument per vector in a tuple (as for svcreate)
Otherwise the count is 1. */
static unsigned int
parse_count (const function_instance &instance, const char *&format)
{
if (format[0] == '*' && format[1] == 'q')
{
format += 2;
return instance.elements_per_vq (0);
}
if (format[0] == '*' && format[1] == 't')
{
format += 2;
return instance.vectors_per_tuple ();
}
return 1;
}
/* Read a type signature for INSTANCE from FORMAT. Add the argument types
to ARGUMENT_TYPES and return the return type.
The format is a comma-separated list of types (as for parse_type),
with the first type being the return type and the rest being the
argument types. Each argument type can be followed by an optional
count (as for parse_count). */
static tree
parse_signature (const function_instance &instance, const char *format,
vec &argument_types)
{
tree return_type = parse_type (instance, format);
while (format[0] == ',')
{
format += 1;
tree argument_type = parse_type (instance, format);
unsigned int count = parse_count (instance, format);
for (unsigned int i = 0; i < count; ++i)
argument_types.quick_push (argument_type);
}
gcc_assert (format[0] == 0);
return return_type;
}
/* Add one function instance for GROUP, using mode suffix MODE_SUFFIX_ID,
the type suffixes at index TI and the predication suffix at index PI.
The other arguments are as for build_all. */
static void
build_one (function_builder &b, const char *signature,
const function_group_info &group, mode_suffix_index mode_suffix_id,
unsigned int ti, unsigned int pi, bool force_direct_overloads)
{
/* Byte forms of svdupq take 16 arguments. */
auto_vec argument_types;
function_instance instance (group.base_name, *group.base, *group.shape,
mode_suffix_id, group.types[ti],
group.preds[pi]);
tree return_type = parse_signature (instance, signature, argument_types);
apply_predication (instance, return_type, argument_types);
b.add_unique_function (instance, return_type, argument_types,
group.required_extensions, force_direct_overloads);
}
/* GROUP describes some sort of gather or scatter operation. There are
two cases:
- If the function has any type suffixes (as for loads and stores), the
first function type suffix specifies either a 32-bit or a 64-bit type,
which in turn selects either MODE32 or MODE64 as the addressing mode.
Add a function instance for every type and predicate combination
in GROUP for which the associated addressing mode is not MODE_none.
- If the function has no type suffixes (as for prefetches), add one
MODE32 form and one MODE64 form for each predication type.
The other arguments are as for build_all. */
static void
build_32_64 (function_builder &b, const char *signature,
const function_group_info &group, mode_suffix_index mode32,
mode_suffix_index mode64, bool force_direct_overloads = false)
{
for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
if (group.types[0][0] == NUM_TYPE_SUFFIXES)
{
gcc_assert (mode32 != MODE_none && mode64 != MODE_none);
build_one (b, signature, group, mode32, 0, pi,
force_direct_overloads);
build_one (b, signature, group, mode64, 0, pi,
force_direct_overloads);
}
else
for (unsigned int ti = 0; group.types[ti][0] != NUM_TYPE_SUFFIXES; ++ti)
{
unsigned int bits = type_suffixes[group.types[ti][0]].element_bits;
gcc_assert (bits == 32 || bits == 64);
mode_suffix_index mode = bits == 32 ? mode32 : mode64;
if (mode != MODE_none)
build_one (b, signature, group, mode, ti, pi,
force_direct_overloads);
}
}
/* For every type and predicate combination in GROUP, add one function
that takes a scalar (pointer) base and a signed vector array index,
and another that instead takes an unsigned vector array index.
The vector array index has the same element size as the first
function type suffix. SIGNATURE is as for build_all. */
static void
build_sv_index (function_builder &b, const char *signature,
const function_group_info &group)
{
build_32_64 (b, signature, group, MODE_s32index, MODE_s64index);
build_32_64 (b, signature, group, MODE_u32index, MODE_u64index);
}
/* Like build_sv_index, but only handle 64-bit types. */
static void
build_sv_index64 (function_builder &b, const char *signature,
const function_group_info &group)
{
build_32_64 (b, signature, group, MODE_none, MODE_s64index);
build_32_64 (b, signature, group, MODE_none, MODE_u64index);
}
/* Like build_sv_index, but taking vector byte offsets instead of vector
array indices. */
static void
build_sv_offset (function_builder &b, const char *signature,
const function_group_info &group)
{
build_32_64 (b, signature, group, MODE_s32offset, MODE_s64offset);
build_32_64 (b, signature, group, MODE_u32offset, MODE_u64offset);
}
/* Like build_sv_offset, but exclude offsets that must be interpreted
as signed (i.e. s32offset). */
static void
build_sv_uint_offset (function_builder &b, const char *signature,
const function_group_info &group)
{
build_32_64 (b, signature, group, MODE_none, MODE_s64offset);
build_32_64 (b, signature, group, MODE_u32offset, MODE_u64offset);
}
/* For every type and predicate combination in GROUP, add a function
that takes a vector base address and no displacement. The vector
base has the same element size as the first type suffix.
The other arguments are as for build_all. */
static void
build_v_base (function_builder &b, const char *signature,
const function_group_info &group,
bool force_direct_overloads = false)
{
build_32_64 (b, signature, group, MODE_u32base, MODE_u64base,
force_direct_overloads);
}
/* Like build_v_base, but for functions that also take a scalar array
index. */
static void
build_vs_index (function_builder &b, const char *signature,
const function_group_info &group,
bool force_direct_overloads = false)
{
build_32_64 (b, signature, group, MODE_u32base_index, MODE_u64base_index,
force_direct_overloads);
}
/* Like build_v_base, but for functions that also take a scalar byte
offset. */
static void
build_vs_offset (function_builder &b, const char *signature,
const function_group_info &group,
bool force_direct_overloads = false)
{
build_32_64 (b, signature, group, MODE_u32base_offset, MODE_u64base_offset,
force_direct_overloads);
}
/* Add a function instance for every type and predicate combination
in GROUP. Take the function base name from GROUP and the mode suffix
from MODE_SUFFIX_ID. Use SIGNATURE to construct the function signature
without a governing predicate, then use apply_predication to add in the
predicate. FORCE_DIRECT_OVERLOADS is true if there is a one-to-one
mapping between "short" and "full" names, and if standard overload
resolution therefore isn't necessary. */
static void
build_all (function_builder &b, const char *signature,
const function_group_info &group, mode_suffix_index mode_suffix_id,
bool force_direct_overloads = false)
{
for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
for (unsigned int ti = 0;
ti == 0 || group.types[ti][0] != NUM_TYPE_SUFFIXES; ++ti)
build_one (b, signature, group, mode_suffix_id, ti, pi,
force_direct_overloads);
}
/* TYPE is the largest type suffix associated with the arguments of R,
but the result is twice as wide. Return the associated type suffix
if it exists, otherwise report an appropriate error and return
NUM_TYPE_SUFFIXES. */
static type_suffix_index
long_type_suffix (function_resolver &r, type_suffix_index type)
{
unsigned int element_bits = type_suffixes[type].element_bits;
if (type_suffixes[type].integer_p && element_bits < 64)
return find_type_suffix (type_suffixes[type].tclass, element_bits * 2);
r.report_no_such_form (type);
return NUM_TYPE_SUFFIXES;
}
/* Declare the function shape NAME, pointing it to an instance
of class _def. */
#define SHAPE(NAME) \
static CONSTEXPR const NAME##_def NAME##_obj; \
namespace shapes { const function_shape *const NAME = &NAME##_obj; }
/* Base class for functions that are not overloaded. */
struct nonoverloaded_base : public function_shape
{
bool
explicit_type_suffix_p (unsigned int) const OVERRIDE
{
return true;
}
tree
resolve (function_resolver &) const OVERRIDE
{
gcc_unreachable ();
}
};
/* Base class for overloaded functions. Bit N of EXPLICIT_MASK is true
if type suffix N appears in the overloaded name. */
template
struct overloaded_base : public function_shape
{
bool
explicit_type_suffix_p (unsigned int i) const OVERRIDE
{
return (EXPLICIT_MASK >> i) & 1;
}
};
/* Base class for adr_index and adr_offset. */
struct adr_base : public overloaded_base<0>
{
/* The function takes two arguments: a vector base and a vector displacement
(either an index or an offset). Resolve based on them both. */
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
mode_suffix_index mode;
if (!r.check_gp_argument (2, i, nargs)
|| (mode = r.resolve_adr_address (0)) == MODE_none)
return error_mark_node;
return r.resolve_to (mode);
};
};
/* Base class for narrowing bottom binary functions that take an
immediate second operand. The result is half the size of input
and has class CLASS. */
template
struct binary_imm_narrowb_base : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_n);
STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
|| CLASS == TYPE_unsigned);
if (CLASS == TYPE_unsigned)
build_all (b, "vhu0,v0,su64", group, MODE_n);
else
build_all (b, "vh0,v0,su64", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform (1, 1);
}
};
/* The top equivalent of binary_imm_narrowb_base. It takes three arguments,
with the first being the values of the even elements, which are typically
the result of the narrowb operation. */
template
struct binary_imm_narrowt_base : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_n);
STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
|| CLASS == TYPE_unsigned);
if (CLASS == TYPE_unsigned)
build_all (b, "vhu0,vhu0,v0,su64", group, MODE_n);
else
build_all (b, "vh0,vh0,v0,su64", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (3, i, nargs)
|| (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
|| !r.require_derived_vector_type (i, i + 1, type, CLASS, r.HALF_SIZE)
|| !r.require_integer_immediate (i + 2))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
/* Base class for long (i.e. narrow op narrow -> wide) binary functions
that take an immediate second operand. The type suffix specifies
the wider type. */
struct binary_imm_long_base : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_n);
build_all (b, "v0,vh0,su64", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type, result_type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_integer_immediate (i + 1)
|| (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
if (tree res = r.lookup_form (r.mode_suffix_id, result_type))
return res;
return r.report_no_such_form (type);
}
};
/* Base class for inc_dec and inc_dec_pat. */
struct inc_dec_base : public overloaded_base<0>
{
CONSTEXPR inc_dec_base (bool pat_p) : m_pat_p (pat_p) {}
/* Resolve based on the first argument only, which must be either a
scalar or a vector. If it's a scalar, it must be a 32-bit or
64-bit integer. */
tree
resolve (function_resolver &r) const
{
unsigned int i, nargs;
if (!r.check_gp_argument (m_pat_p ? 3 : 2, i, nargs)
|| !r.require_vector_or_scalar_type (i))
return error_mark_node;
mode_suffix_index mode;
type_suffix_index type;
if (r.scalar_argument_p (i))
{
mode = MODE_n;
type = r.infer_integer_scalar_type (i);
}
else
{
mode = MODE_none;
type = r.infer_vector_type (i);
}
if (type == NUM_TYPE_SUFFIXES)
return error_mark_node;
for (++i; i < nargs; ++i)
if (!r.require_integer_immediate (i))
return error_mark_node;
return r.resolve_to (mode, type);
}
bool
check (function_checker &c) const OVERRIDE
{
return c.require_immediate_range (m_pat_p ? 2 : 1, 1, 16);
}
bool m_pat_p;
};
/* Base class for load and load_replicate. */
struct load_contiguous_base : public overloaded_base<0>
{
/* Resolve a call based purely on a pointer argument. The other arguments
are a governing predicate and (for MODE_vnum) a vnum offset. */
tree
resolve (function_resolver &r) const OVERRIDE
{
bool vnum_p = r.mode_suffix_id == MODE_vnum;
gcc_assert (r.mode_suffix_id == MODE_none || vnum_p);
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (vnum_p ? 2 : 1, i, nargs)
|| (type = r.infer_pointer_type (i)) == NUM_TYPE_SUFFIXES
|| (vnum_p && !r.require_scalar_type (i + 1, "int64_t")))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
/* Base class for gather loads that take a scalar base and a vector
displacement (either an offset or an index). */
struct load_gather_sv_base : public overloaded_base<0>
{
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
mode_suffix_index mode;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_pointer_type (i, true)) == NUM_TYPE_SUFFIXES
|| (mode = r.resolve_sv_displacement (i + 1, type, true),
mode == MODE_none))
return error_mark_node;
return r.resolve_to (mode, type);
}
};
/* Base class for load_ext_gather_index and load_ext_gather_offset,
which differ only in the units of the displacement. */
struct load_ext_gather_base : public overloaded_base<1>
{
/* Resolve a gather load that takes one of:
- a scalar pointer base and a vector displacement
- a vector base with no displacement or
- a vector base and a scalar displacement
The function has an explicit type suffix that determines the type
of the loaded data. */
tree
resolve (function_resolver &r) const OVERRIDE
{
/* No resolution is needed for a vector base with no displacement;
there's a one-to-one mapping between short and long names. */
gcc_assert (r.displacement_units () != UNITS_none);
type_suffix_index type = r.type_suffix_ids[0];
unsigned int i, nargs;
mode_suffix_index mode;
if (!r.check_gp_argument (2, i, nargs)
|| (mode = r.resolve_gather_address (i, type, true)) == MODE_none)
return error_mark_node;
return r.resolve_to (mode, type);
}
};
/* sv_t svfoo[_t0](sv_t, sv_t,
sv_t) (for integer t0)
sv_t svmmla[_t0](sv_t, sv_t, sv_t) (for floating-point t0)
The functions act like the equivalent of "ternary_qq" for integer elements
and normal vector-only ternary functions for floating-point elements. */
struct mmla_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
/* svmmla is distributed over several extensions. Allow the common
denominator to define the overloaded svmmla function without
defining any specific versions. */
if (group.types[0][0] != NUM_TYPE_SUFFIXES)
{
if (type_suffixes[group.types[0][0]].float_p)
build_all (b, "v0,v0,v0,v0", group, MODE_none);
else
build_all (b, "v0,v0,vq0,vq0", group, MODE_none);
}
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (3, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
/* Make sure that the function exists now, since not all forms
follow a set pattern after this point. */
tree res = r.resolve_to (r.mode_suffix_id, type);
if (res == error_mark_node)
return res;
bool float_p = type_suffixes[type].float_p;
unsigned int modifier = float_p ? r.SAME_SIZE : r.QUARTER_SIZE;
if (!r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
modifier)
|| !r.require_derived_vector_type (i + 2, i, type, r.SAME_TYPE_CLASS,
modifier))
return error_mark_node;
return res;
}
};
SHAPE (mmla)
/* Base class for prefetch_gather_index and prefetch_gather_offset,
which differ only in the units of the displacement. */
struct prefetch_gather_base : public overloaded_base<0>
{
/* Resolve a gather prefetch that takes one of:
- a scalar pointer base (const void *) and a vector displacement
- a vector base with no displacement or
- a vector base and a scalar displacement
The prefetch operation is the final argument. This is purely a
mode-based resolution; there are no type suffixes. */
tree
resolve (function_resolver &r) const OVERRIDE
{
bool has_displacement_p = r.displacement_units () != UNITS_none;
unsigned int i, nargs;
mode_suffix_index mode;
if (!r.check_gp_argument (has_displacement_p ? 3 : 2, i, nargs)
|| (mode = r.resolve_gather_address (i, NUM_TYPE_SUFFIXES,
false)) == MODE_none
|| !r.require_integer_immediate (nargs - 1))
return error_mark_node;
return r.resolve_to (mode);
}
};
/* Wraps BASE to provide a narrowing shift right function. Argument N
is an immediate shift amount in the range [1, sizeof(_t) * 4]. */
template
struct shift_right_imm_narrow_wrapper : public BASE
{
bool
check (function_checker &c) const OVERRIDE
{
unsigned int bits = c.type_suffix (0).element_bits / 2;
return c.require_immediate_range (N, 1, bits);
}
};
/* Base class for store_scatter_index and store_scatter_offset,
which differ only in the units of the displacement. */
struct store_scatter_base : public overloaded_base<0>
{
/* Resolve a scatter store that takes one of:
- a scalar pointer base and a vector displacement
- a vector base with no displacement or
- a vector base and a scalar displacement
The stored data is the final argument, and it determines the
type suffix. */
tree
resolve (function_resolver &r) const OVERRIDE
{
bool has_displacement_p = r.displacement_units () != UNITS_none;
unsigned int i, nargs;
mode_suffix_index mode;
type_suffix_index type;
if (!r.check_gp_argument (has_displacement_p ? 3 : 2, i, nargs)
|| (type = r.infer_sd_vector_type (nargs - 1)) == NUM_TYPE_SUFFIXES
|| (mode = r.resolve_gather_address (i, type, false)) == MODE_none)
return error_mark_node;
return r.resolve_to (mode, type);
}
};
/* Base class for ternary operations in which the final argument is an
immediate shift amount. The derived class should check the range. */
struct ternary_shift_imm_base : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_n);
build_all (b, "v0,v0,v0,su64", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform (2, 1);
}
};
/* Base class for ternary operations in which the first argument has the
same element type as the result, and in which the second and third
arguments have an element type that is derived the first.
MODIFIER is the number of element bits in the second and third
arguments, or a function_resolver modifier that says how this
precision is derived from the first argument's elements.
TYPE_CLASS2 and TYPE_CLASS3 are the type classes of the second and
third arguments, or function_resolver::SAME_TYPE_CLASS if the type
class is the same as the first argument. */
template
struct ternary_resize2_opt_n_base : public overloaded_base<0>
{
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (3, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_derived_vector_type (i + 1, i, type, TYPE_CLASS2,
MODIFIER))
return error_mark_node;
return r.finish_opt_n_resolution (i + 2, i, type, TYPE_CLASS3, MODIFIER);
}
};
/* Like ternary_resize2_opt_n_base, but for functions that don't take
a final scalar argument. */
template
struct ternary_resize2_base : public overloaded_base<0>
{
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (3, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_derived_vector_type (i + 1, i, type, TYPE_CLASS2,
MODIFIER)
|| !r.require_derived_vector_type (i + 2, i, type, TYPE_CLASS3,
MODIFIER))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
/* Like ternary_resize2_opt_n_base, but for functions that take a final
lane argument. */
template
struct ternary_resize2_lane_base : public overloaded_base<0>
{
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (4, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_derived_vector_type (i + 1, i, type, TYPE_CLASS2,
MODIFIER)
|| !r.require_derived_vector_type (i + 2, i, type, TYPE_CLASS3,
MODIFIER)
|| !r.require_integer_immediate (i + 3))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
/* A specialization of ternary_resize2_lane_base for bfloat16 elements,
indexed in groups of N elements. */
template
struct ternary_bfloat_lane_base
: public ternary_resize2_lane_base<16, TYPE_bfloat, TYPE_bfloat>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,vB,vB,su64", group, MODE_none);
}
bool
check (function_checker &c) const OVERRIDE
{
return c.require_immediate_lane_index (3, N);
}
};
/* A specialization of ternary_resize2_lane_base for quarter-sized
elements. */
template
struct ternary_qq_lane_base
: public ternary_resize2_lane_base
{
bool
check (function_checker &c) const OVERRIDE
{
return c.require_immediate_lane_index (3, 4);
}
};
/* Base class for narrowing bottom unary functions. The result is half
the size of input and has class CLASS. */
template
struct unary_narrowb_base : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
|| CLASS == TYPE_unsigned);
if (CLASS == TYPE_unsigned)
build_all (b, "vhu0,v0", group, MODE_none);
else
build_all (b, "vh0,v0", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_unary (CLASS, r.HALF_SIZE);
}
};
/* The top equivalent of unary_imm_narrowb_base. All forms take the values
of the even elements as an extra argument, before any governing predicate.
These even elements are typically the result of the narrowb operation. */
template
struct unary_narrowt_base : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
|| CLASS == TYPE_unsigned);
if (CLASS == TYPE_unsigned)
build_all (b, "vhu0,vhu0,v0", group, MODE_none);
else
build_all (b, "vh0,vh0,v0", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
|| !r.require_derived_vector_type (i, i + 1, type, CLASS, r.HALF_SIZE))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
/* sv_t svfoo[_m0base]_[m1]index(sv_t, sv_t)
for all valid combinations of vector base type and vector
displacement type . */
struct adr_index_def : public adr_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_index);
build_all (b, "b,b,d", group, MODE_u32base_s32index);
build_all (b, "b,b,d", group, MODE_u32base_u32index);
build_all (b, "b,b,d", group, MODE_u64base_s64index);
build_all (b, "b,b,d", group, MODE_u64base_u64index);
}
};
SHAPE (adr_index)
/* sv_t svfoo[_m0base]_[m1]offset(sv_t, sv_t).
for all valid combinations of vector base type and vector
displacement type . */
struct adr_offset_def : public adr_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_offset);
build_all (b, "b,b,d", group, MODE_u32base_s32offset);
build_all (b, "b,b,d", group, MODE_u32base_u32offset);
build_all (b, "b,b,d", group, MODE_u64base_s64offset);
build_all (b, "b,b,d", group, MODE_u64base_u64offset);
}
};
SHAPE (adr_offset)
/* sv_t svfoo[_t0](sv_t, sv_t)
i.e. a binary operation with uniform types, but with no scalar form. */
struct binary_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,v0", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform (2);
}
};
SHAPE (binary)
/* sv_t svfoo[_t0](sv_t, sv_t)
sv_t svfoo[_n_t0](sv_t, _t).
i.e. a version of the standard binary shape binary_opt_n in which
the final argument is always a signed integer. */
struct binary_int_opt_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,vs0", group, MODE_none);
build_all (b, "v0,v0,ss0", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
return r.finish_opt_n_resolution (i + 1, i, type, TYPE_signed);
}
};
SHAPE (binary_int_opt_n)
/* sv_t svfoo_(sv_t, sv_t, uint64_t)
where the final argument is an integer constant expression in the
range [0, 16 / sizeof (_t) - 1]. */
struct binary_lane_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,v0,su64", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform (2, 1);
}
bool
check (function_checker &c) const OVERRIDE
{
return c.require_immediate_lane_index (2);
}
};
SHAPE (binary_lane)
/* sv_t svfoo[_t0](sv_t, sv_t, uint64_t).
where the final argument is an integer constant expression in the
range [0, 32 / sizeof (_t) - 1]. */
struct binary_long_lane_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,vh0,vh0,su64", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type, result_type;
if (!r.check_gp_argument (3, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_matching_vector_type (i + 1, type)
|| !r.require_integer_immediate (i + 2)
|| (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
if (tree res = r.lookup_form (r.mode_suffix_id, result_type))
return res;
return r.report_no_such_form (type);
}
bool
check (function_checker &c) const OVERRIDE
{
return c.require_immediate_lane_index (2);
}
};
SHAPE (binary_long_lane)
/* sv_t svfoo[_t0](sv_t, sv_t)
sv_t svfoo[_n_t0](sv_t, _t). */
struct binary_long_opt_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,vh0,vh0", group, MODE_none);
build_all (b, "v0,vh0,sh0", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type, result_type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS,
r.SAME_SIZE, result_type);
}
};
SHAPE (binary_long_opt_n)
/* sv_t svfoo[_n_t0](sv_t, _t).
i.e. a binary operation in which the final argument is always a scalar
rather than a vector. */
struct binary_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_n);
build_all (b, "v0,v0,s0", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_derived_scalar_type (i + 1, r.SAME_TYPE_CLASS))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
SHAPE (binary_n)
/* sv_t svfoo[_t0](sv_t, sv_t)
sv_t svfoo[_n_t0](sv_t, _t)
i.e. a version of binary_opt_n in which the output elements are half the
width of the input elements. */
struct binary_narrowb_opt_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "vh0,v0,v0", group, MODE_none);
build_all (b, "vh0,v0,s0", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform_opt_n (2);
}
};
SHAPE (binary_narrowb_opt_n)
/* sv_t svfoo[_t0](sv_t, sv_t, sv_t)
sv_t svfoo[_n_t0](sv_t, sv_t, _t)
This is the "top" counterpart to binary_narrowb_opt_n. */
struct binary_narrowt_opt_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "vh0,vh0,v0,v0", group, MODE_none);
build_all (b, "vh0,vh0,v0,s0", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (3, i, nargs)
|| (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
|| !r.require_derived_vector_type (i, i + 1, type, r.SAME_TYPE_CLASS,
r.HALF_SIZE))
return error_mark_node;
return r.finish_opt_n_resolution (i + 2, i + 1, type);
}
};
SHAPE (binary_narrowt_opt_n)
/* sv_t svfoo[_t0](sv_t, sv_t)
sv_t svfoo[_n_t0](sv_t, _t)
i.e. the standard shape for binary operations that operate on
uniform types. */
struct binary_opt_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,v0", group, MODE_none);
/* _b functions do not have an _n form, but are classified as
binary_opt_n so that they can be overloaded with vector
functions. */
if (group.types[0][0] == TYPE_SUFFIX_b)
gcc_assert (group.types[0][1] == NUM_TYPE_SUFFIXES);
else
build_all (b, "v0,v0,s0", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform_opt_n (2);
}
};
SHAPE (binary_opt_n)
/* svbool_t svfoo(svbool_t, svbool_t). */
struct binary_pred_def : public nonoverloaded_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
build_all (b, "v0,v0,v0", group, MODE_none);
}
};
SHAPE (binary_pred)
/* sv_t svfoo[_](sv_t, sv_t, uint64_t)
where the final argument must be 90 or 270. */
struct binary_rotate_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,v0,su64", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform (2, 1);
}
bool
check (function_checker &c) const OVERRIDE
{
return c.require_immediate_either_or (2, 90, 270);
}
};
SHAPE (binary_rotate)
/* sv_t svfoo_t0(_t, _t)
i.e. a binary function that takes two scalars and returns a vector.
An explicit type suffix is required. */
struct binary_scalar_def : public nonoverloaded_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
build_all (b, "v0,s0,s0", group, MODE_none);
}
};
SHAPE (binary_scalar)
/* sv_t svfoo[_t0](sv_t, sv_t).
i.e. a version of "binary" that returns unsigned integers. */
struct binary_to_uint_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "vu0,v0,v0", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform (2);
}
};
SHAPE (binary_to_uint)
/* sv_t svfoo[_t0](sv_t, sv_t)
i.e. a version of "binary" in which the final argument is always an
unsigned integer. */
struct binary_uint_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,vu0", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_derived_vector_type (i + 1, i, type, TYPE_unsigned))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
SHAPE (binary_uint)
/* sv_t svfoo[_t0](sv_t, _t)
i.e. a version of binary_n in which the final argument is always an
unsigned integer. */
struct binary_uint_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,su0", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_derived_scalar_type (i + 1, TYPE_unsigned))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
SHAPE (binary_uint_n)
/* sv_t svfoo[_t0](sv_t, sv_t)
sv_t svfoo[_n_t0](sv_t, _t)
i.e. a version of the standard binary shape binary_opt_n in which
the final argument is always an unsigned integer. */
struct binary_uint_opt_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,vu0", group, MODE_none);
build_all (b, "v0,v0,su0", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
return r.finish_opt_n_resolution (i + 1, i, type, TYPE_unsigned);
}
};
SHAPE (binary_uint_opt_n)
/* sv_t svfoo[_t0](sv_t, uint64_t).
i.e. a version of binary_n in which the final argument is always
a 64-bit unsigned integer. */
struct binary_uint64_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,su64", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_scalar_type (i + 1, "uint64_t"))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
SHAPE (binary_uint64_n)
/* sv_t svfoo[_t0](sv_t, svuint64_t)
sv_t svfoo[_n_t0](sv_t, uint64_t)
i.e. a version of the standard binary shape binary_opt_n in which
the final argument is always a uint64_t. */
struct binary_uint64_opt_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,vu64", group, MODE_none);
build_all (b, "v0,v0,su64", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
return r.finish_opt_n_resolution (i + 1, i, type, TYPE_unsigned, 64);
}
};
SHAPE (binary_uint64_opt_n)
/* sv_t svfoo[_t0](sv_t, sv_t). */
struct binary_wide_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,vh0", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
r.HALF_SIZE))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
SHAPE (binary_wide)
/* sv_t svfoo[_t0](sv_t, sv_t)
sv_t svfoo[_n_t0](sv_t, _t). */
struct binary_wide_opt_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,vh0", group, MODE_none);
build_all (b, "v0,v0,sh0", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS,
r.HALF_SIZE);
}
};
SHAPE (binary_wide_opt_n)
/* sv_t svfoo[_t0](sv_t, sv_t)
_t svfoo[_n_t0](_t, sv_t). */
struct clast_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,v0", group, MODE_none);
build_all (b, "s0,s0,v0", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
if (!r.check_gp_argument (2, i, nargs)
|| !r.require_vector_or_scalar_type (i))
return error_mark_node;
if (r.scalar_argument_p (i))
{
type_suffix_index type;
if (!r.require_derived_scalar_type (i, r.SAME_TYPE_CLASS)
|| (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
return r.resolve_to (MODE_n, type);
}
else
{
type_suffix_index type;
if ((type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_matching_vector_type (i + 1, type))
return error_mark_node;
return r.resolve_to (MODE_none, type);
}
}
};
SHAPE (clast)
/* svbool_t svfoo[_t0](sv_t, sv_t). */
struct compare_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "vp,v0,v0", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform (2);
}
};
SHAPE (compare)
/* svbool_t svfoo[_t0](sv_t, sv_t)
svbool_t svfoo[_n_t0](sv_t, _t)
i.e. a comparison between two vectors, or between a vector and a scalar. */
struct compare_opt_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "vp,v0,v0", group, MODE_none);
build_all (b, "vp,v0,s0", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform_opt_n (2);
}
};
SHAPE (compare_opt_n)
/* svbool_t svfoo[_t0](const _t *, const _t *). */
struct compare_ptr_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "vp,al,al", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_pointer_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_matching_pointer_type (i + 1, i, type))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
SHAPE (compare_ptr)
/* svbool_t svfoo_t0[_t1](_t, _t)
where _t0 is a _b suffix that describes the predicate result.
There is no direct relationship between the element sizes of _t0
and _t1. */
struct compare_scalar_def : public overloaded_base<1>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "vp,s1,s1", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_integer_scalar_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_matching_integer_scalar_type (i + 1, i, type))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0], type);
}
};
SHAPE (compare_scalar)
/* svbool_t svfoo[_t0](sv_t, svint64_t) (for signed t0)
svbool_t svfoo[_n_t0](sv_t, int64_t) (for signed t0)
svbool_t svfoo[_t0](sv_t, svuint64_t) (for unsigned t0)
svbool_t svfoo[_n_t0](sv_t, uint64_t) (for unsigned t0)
i.e. a comparison in which the second argument is 64 bits. */
struct compare_wide_opt_n_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "vp,v0,vw0", group, MODE_none);
build_all (b, "vp,v0,sw0", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS, 64);
}
};
SHAPE (compare_wide_opt_n)
/* uint64_t svfoo(). */
struct count_inherent_def : public nonoverloaded_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
build_all (b, "su64", group, MODE_none);
}
};
SHAPE (count_inherent)
/* uint64_t svfoo(enum svpattern). */
struct count_pat_def : public nonoverloaded_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
build_all (b, "su64,epattern", group, MODE_none);
}
};
SHAPE (count_pat)
/* uint64_t svfoo(svbool_t). */
struct count_pred_def : public nonoverloaded_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
build_all (b, "su64,vp", group, MODE_none);
}
};
SHAPE (count_pred)
/* uint64_t svfoo[_t0](sv_t). */
struct count_vector_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "su64,v0", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform (1);
}
};
SHAPE (count_vector)
/* svxN_t svfoo[_t0](sv_t, ..., sv_t)
where there are N arguments in total. */
struct create_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "t0,v0*t", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform (r.vectors_per_tuple ());
}
};
SHAPE (create)
/* sv_t svfoo[_n]_t0(_t, ..., _t)
where there are enough arguments to fill 128 bits of data (or to
control 128 bits of data in the case of predicates). */
struct dupq_def : public overloaded_base<1>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
/* The "_n" suffix is optional; the full name has it, but the short
name doesn't. */
build_all (b, "v0,s0*q", group, MODE_n, true);
}
tree
resolve (function_resolver &) const OVERRIDE
{
/* The short forms just make "_n" implicit, so no resolution is needed. */
gcc_unreachable ();
}
};
SHAPE (dupq)
/* sv_t svfoo[_t0](sv_t, sv_t, uint64_t)
where the final argument is an integer constant expression that when
multiplied by the number of bytes in t0 is in the range [0, 255]. */
struct ext_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,v0,su64", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
return r.resolve_uniform (2, 1);
}
bool
check (function_checker &c) const OVERRIDE
{
unsigned int bytes = c.type_suffix (0).element_bytes;
return c.require_immediate_range (2, 0, 256 / bytes - 1);
}
};
SHAPE (ext)
/* _t svfoo[_t0](_t, sv_t). */
struct fold_left_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "s0,s0,v0", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| !r.require_derived_scalar_type (i, r.SAME_TYPE_CLASS)
|| (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
SHAPE (fold_left)
/* sv_t svfoo[_t0](svxN_t, uint64_t)
where the final argument is an integer constant expression in
the range [0, N - 1]. */
struct get_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,t0,su64", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_integer_immediate (i + 1))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
bool
check (function_checker &c) const OVERRIDE
{
unsigned int nvectors = c.vectors_per_tuple ();
return c.require_immediate_range (1, 0, nvectors - 1);
}
};
SHAPE (get)
/* sv_t svfoo[_t0](sv_t, uint64_t)
_t svfoo[_n_t0](_t, uint64_t)
where the t0 in the vector form is a signed or unsigned integer
whose size is tied to the [bhwd] suffix of "svfoo". */
struct inc_dec_def : public inc_dec_base
{
CONSTEXPR inc_dec_def () : inc_dec_base (false) {}
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
/* These functions are unusual in that the type suffixes for
the scalar and vector forms are not related. The vector
form always has exactly two potential suffixes while the
scalar form always has four. */
if (group.types[2][0] == NUM_TYPE_SUFFIXES)
build_all (b, "v0,v0,su64", group, MODE_none);
else
build_all (b, "s0,s0,su64", group, MODE_n);
}
};
SHAPE (inc_dec)
/* sv_t svfoo[_t0](sv_t, enum svpattern, uint64_t)
_t svfoo[_n_t0](_t, enum svpattern, uint64_t)
where the t0 in the vector form is a signed or unsigned integer
whose size is tied to the [bhwd] suffix of "svfoo". */
struct inc_dec_pat_def : public inc_dec_base
{
CONSTEXPR inc_dec_pat_def () : inc_dec_base (true) {}
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
/* These functions are unusual in that the type suffixes for
the scalar and vector forms are not related. The vector
form always has exactly two potential suffixes while the
scalar form always has four. */
if (group.types[2][0] == NUM_TYPE_SUFFIXES)
build_all (b, "v0,v0,epattern,su64", group, MODE_none);
else
build_all (b, "s0,s0,epattern,su64", group, MODE_n);
}
};
SHAPE (inc_dec_pat)
/* sv_t svfoo[_t0](sv_t, svbool_t). */
struct inc_dec_pred_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
build_all (b, "v0,v0,vp", group, MODE_none);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_vector_type (i + 1, VECTOR_TYPE_svbool_t))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
}
};
SHAPE (inc_dec_pred)
/* _t svfoo[_n_t0]_t1(_t, svbool_t)
where _t1 is a _b suffix that describes the svbool_t argument. */
struct inc_dec_pred_scalar_def : public overloaded_base<2>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_n);
build_all (b, "s0,s0,vp", group, MODE_n);
}
tree
resolve (function_resolver &r) const OVERRIDE
{
unsigned int i, nargs;
type_suffix_index type;
if (!r.check_gp_argument (2, i, nargs)
|| (type = r.infer_integer_scalar_type (i)) == NUM_TYPE_SUFFIXES
|| !r.require_vector_type (i + 1, VECTOR_TYPE_svbool_t))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type, r.type_suffix_ids[1]);
}
};
SHAPE (inc_dec_pred_scalar)
/* sv[xN]_t svfoo_t0(). */
struct inherent_def : public nonoverloaded_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
build_all (b, "t0", group, MODE_none);
}
};
SHAPE (inherent)
/* svbool_t svfoo[_b](). */
struct inherent_b_def : public overloaded_base<0>
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
/* The "_b" suffix is optional; the full name has it, but the short
name doesn't. */
build_all (b, "v0", group, MODE_none, true);
}
tree
resolve (function_resolver &) const OVERRIDE
{
/* The short forms just make "_b" implicit, so no resolution is needed. */
gcc_unreachable ();
}
};
SHAPE (inherent_b)
/* sv[xN]_t svfoo[_t0](const _t *)
sv[xN]_t svfoo_vnum[_t0](const _t *, int64_t). */
struct load_def : public load_contiguous_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_none);
b.add_overloaded_functions (group, MODE_vnum);
build_all (b, "t0,al", group, MODE_none);
build_all (b, "t0,al,ss64", group, MODE_vnum);
}
};
SHAPE (load)
/* sv_t svfoo_t0(const _t *)
sv_t svfoo_vnum_t0(const _t *, int64_t)
where is determined by the function base name. */
struct load_ext_def : public nonoverloaded_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
build_all (b, "t0,al", group, MODE_none);
build_all (b, "t0,al,ss64", group, MODE_vnum);
}
};
SHAPE (load_ext)
/* sv_t svfoo_[s32]index_t0(const _t *, svint32_t)
sv_t svfoo_[s64]index_t0(const _t *, svint64_t)
sv_t svfoo_[u32]index_t0(const _t *, svuint32_t)
sv_t svfoo_[u64]index_t0(const _t *, svuint64_t)
sv_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
sv_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
where is determined by the function base name. */
struct load_ext_gather_index_def : public load_ext_gather_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_index);
build_sv_index (b, "t0,al,d", group);
build_vs_index (b, "t0,b,ss64", group);
}
};
SHAPE (load_ext_gather_index)
/* sv_t svfoo_[s64]index_t0(const _t *, svint64_t)
sv_t svfoo_[u64]index_t0(const _t *, svuint64_t)
sv_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
sv_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
where is determined by the function base name. This is
load_ext_gather_index that doesn't support 32-bit vector indices. */
struct load_ext_gather_index_restricted_def : public load_ext_gather_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_index);
build_sv_index64 (b, "t0,al,d", group);
build_vs_index (b, "t0,b,ss64", group);
}
};
SHAPE (load_ext_gather_index_restricted)
/* sv_t svfoo_[s32]offset_t0(const _t *, svint32_t)
sv_t svfoo_[s64]offset_t0(const _t *, svint64_t)
sv_t svfoo_[u32]offset_t0(const _t *, svuint32_t)
sv_t svfoo_[u64]offset_t0(const _t *, svuint64_t)
sv_t svfoo[_u32base]_t0(svuint32_t)
sv_t svfoo[_u64base]_t0(svuint64_t)
sv_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
sv_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t)
where is determined by the function base name. */
struct load_ext_gather_offset_def : public load_ext_gather_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_offset);
build_sv_offset (b, "t0,al,d", group);
build_v_base (b, "t0,b", group, true);
build_vs_offset (b, "t0,b,ss64", group);
}
};
SHAPE (load_ext_gather_offset)
/* sv_t svfoo_[s64]offset_t0(const _t *, svint64_t)
sv_t svfoo_[u32]offset_t0(const _t *, svuint32_t)
sv_t svfoo_[u64]offset_t0(const _t *, svuint64_t)
sv_t svfoo[_u32base]_t0(svuint32_t)
sv_t svfoo[_u64base]_t0(svuint64_t)
sv_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
sv_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t)
where is determined by the function base name. This is
load_ext_gather_offset without the s32 vector offset form. */
struct load_ext_gather_offset_restricted_def : public load_ext_gather_base
{
void
build (function_builder &b, const function_group_info &group) const OVERRIDE
{
b.add_overloaded_functions (group, MODE_offset);
build_sv_uint_offset (b, "t0,al,d", group);
build_v_base (b, "t0,b", group, true);
build_vs_offset (b, "t0,b,ss64", group);
}
};
SHAPE (load_ext_gather_offset_restricted)
/* sv_t svfoo_[s32]index[_t0](const _t *, svint32_t)
sv_t svfoo_[s64]index[_t0](const _t *, svint64_t)
sv_t svfoo_[u32]index[_t0](const _t *, svuint32_t)
sv_t svfoo_[u64]index[_t0](const _t *, svuint64_t)
sv_t svfoo_[s32]offset[_t0](const _t *, svint32_t)
sv_t svfoo_[s64]offset[_t0](const _t *, svint64_t)
sv_t svfoo_[u32]offset[_t0](const