openmp: Parsing and some semantic analysis of OpenMP allocate clause

This patch adds parsing of OpenMP allocate clause, but still ignores
it during OpenMP lowering where we should for privatized variables
with allocate clause use the corresponding allocators rather than
allocating them on the stack.

2020-10-28  Jakub Jelinek  <jakub@redhat.com>

gcc/
	* tree-core.h (enum omp_clause_code): Add OMP_CLAUSE_ALLOCATE.
	* tree.h (OMP_CLAUSE_ALLOCATE_ALLOCATOR,
	OMP_CLAUSE_ALLOCATE_COMBINED): Define.
	* tree.c (omp_clause_num_ops, omp_clause_code_name): Add allocate
	clause.
	(walk_tree_1): Handle OMP_CLAUSE_ALLOCATE.
	* tree-pretty-print.c (dump_omp_clause): Likewise.
	* gimplify.c (gimplify_scan_omp_clauses, gimplify_adjust_omp_clauses,
	gimplify_omp_for): Likewise.
	* tree-nested.c (convert_nonlocal_omp_clauses,
	convert_local_omp_clauses): Likewise.
	* omp-low.c (scan_sharing_clauses): Likewise.
gcc/c-family/
	* c-pragma.h (enum pragma_omp_clause): Add PRAGMA_OMP_CLAUSE_ALLOCATE.
	* c-omp.c: Include bitmap.h.
	(c_omp_split_clauses): Handle OMP_CLAUSE_ALLOCATE.
gcc/c/
	* c-parser.c (c_parser_omp_clause_name): Handle allocate.
	(c_parser_omp_clause_allocate): New function.
	(c_parser_omp_all_clauses): Handle PRAGMA_OMP_CLAUSE_ALLOCATE.
	(OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK,
	OMP_PARALLEL_CLAUSE_MASK, OMP_SINGLE_CLAUSE_MASK,
	OMP_TASK_CLAUSE_MASK, OMP_TASKGROUP_CLAUSE_MASK,
	OMP_DISTRIBUTE_CLAUSE_MASK, OMP_TEAMS_CLAUSE_MASK,
	OMP_TARGET_CLAUSE_MASK, OMP_TASKLOOP_CLAUSE_MASK): Add
	PRAGMA_OMP_CLAUSE_ALLOCATE.
	* c-typeck.c (c_finish_omp_clauses): Handle OMP_CLAUSE_ALLOCATE.
gcc/cp/
	* parser.c (cp_parser_omp_clause_name): Handle allocate.
	(cp_parser_omp_clause_allocate): New function.
	(cp_parser_omp_all_clauses): Handle PRAGMA_OMP_CLAUSE_ALLOCATE.
	(OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK,
	OMP_PARALLEL_CLAUSE_MASK, OMP_SINGLE_CLAUSE_MASK,
	OMP_TASK_CLAUSE_MASK, OMP_TASKGROUP_CLAUSE_MASK,
	OMP_DISTRIBUTE_CLAUSE_MASK, OMP_TEAMS_CLAUSE_MASK,
	OMP_TARGET_CLAUSE_MASK, OMP_TASKLOOP_CLAUSE_MASK): Add
	PRAGMA_OMP_CLAUSE_ALLOCATE.
	* semantics.c (finish_omp_clauses): Handle OMP_CLAUSE_ALLOCATE.
	* pt.c (tsubst_omp_clauses): Likewise.
gcc/testsuite/
	* c-c++-common/gomp/allocate-1.c: New test.
	* c-c++-common/gomp/allocate-2.c: New test.
	* c-c++-common/gomp/clauses-1.c (omp_allocator_handle_t): New typedef.
	(foo, bar, baz): Add allocate clauses where allowed.
This commit is contained in:
Jakub Jelinek 2020-10-28 10:38:01 +01:00
parent 2298ca2d3e
commit 3a8b20947f
17 changed files with 739 additions and 57 deletions

View File

@ -35,6 +35,7 @@ along with GCC; see the file COPYING3. If not see
#include "attribs.h"
#include "gimplify.h"
#include "langhooks.h"
#include "bitmap.h"
/* Complete a #pragma oacc wait construct. LOC is the location of
@ -1575,6 +1576,7 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
tree next, c;
enum c_omp_clause_split s;
int i;
bool has_dup_allocate = false;
for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
cclauses[i] = NULL;
@ -2198,6 +2200,71 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
else
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
/* Allocate clause is allowed on target, teams, distribute, parallel,
for, sections and taskloop. Distribute it to all. */
case OMP_CLAUSE_ALLOCATE:
s = C_OMP_CLAUSE_SPLIT_COUNT;
for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
{
switch (i)
{
case C_OMP_CLAUSE_SPLIT_TARGET:
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_MAP)) == 0)
continue;
break;
case C_OMP_CLAUSE_SPLIT_TEAMS:
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
continue;
break;
case C_OMP_CLAUSE_SPLIT_DISTRIBUTE:
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
continue;
break;
case C_OMP_CLAUSE_SPLIT_PARALLEL:
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
continue;
break;
case C_OMP_CLAUSE_SPLIT_FOR:
STATIC_ASSERT (C_OMP_CLAUSE_SPLIT_SECTIONS
== C_OMP_CLAUSE_SPLIT_FOR
&& (C_OMP_CLAUSE_SPLIT_TASKLOOP
== C_OMP_CLAUSE_SPLIT_FOR)
&& (C_OMP_CLAUSE_SPLIT_LOOP
== C_OMP_CLAUSE_SPLIT_FOR));
if (code == OMP_SECTIONS)
break;
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
break;
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
break;
continue;
case C_OMP_CLAUSE_SPLIT_SIMD:
continue;
default:
gcc_unreachable ();
}
if (s != C_OMP_CLAUSE_SPLIT_COUNT)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_ALLOCATE);
OMP_CLAUSE_DECL (c)
= OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_ALLOCATE_ALLOCATOR (c)
= OMP_CLAUSE_ALLOCATE_ALLOCATOR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[s];
cclauses[s] = c;
has_dup_allocate = true;
}
s = (enum c_omp_clause_split) i;
}
gcc_assert (s != C_OMP_CLAUSE_SPLIT_COUNT);
break;
default:
gcc_unreachable ();
}
@ -2205,6 +2272,108 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
cclauses[s] = clauses;
}
if (has_dup_allocate)
{
bool need_prune = false;
bitmap_obstack_initialize (NULL);
for (i = 0; i < C_OMP_CLAUSE_SPLIT_SIMD - (code == OMP_LOOP); i++)
if (cclauses[i])
{
bitmap_head allocate_head;
bitmap_initialize (&allocate_head, &bitmap_default_obstack);
for (c = cclauses[i]; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ALLOCATE
&& DECL_P (OMP_CLAUSE_DECL (c)))
bitmap_set_bit (&allocate_head,
DECL_UID (OMP_CLAUSE_DECL (c)));
for (c = cclauses[i]; c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_LINEAR:
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
case OMP_CLAUSE_TASK_REDUCTION:
if (DECL_P (OMP_CLAUSE_DECL (c)))
bitmap_clear_bit (&allocate_head,
DECL_UID (OMP_CLAUSE_DECL (c)));
break;
default:
break;
}
for (c = cclauses[i]; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ALLOCATE
&& DECL_P (OMP_CLAUSE_DECL (c))
&& bitmap_bit_p (&allocate_head,
DECL_UID (OMP_CLAUSE_DECL (c))))
{
/* Mark allocate clauses which don't have corresponding
explicit data sharing clause. */
OMP_CLAUSE_ALLOCATE_COMBINED (c) = 1;
need_prune = true;
}
}
bitmap_obstack_release (NULL);
if (need_prune)
{
/* At least one allocate clause has been marked. Walk all the
duplicated allocate clauses in sync. If it is marked in all
constituent constructs, diagnose it as invalid and remove
them. Otherwise, remove all marked inner clauses inside
a construct that doesn't have them marked. Keep the outer
marked ones, because some clause duplication is done only
during gimplification. */
tree *p[C_OMP_CLAUSE_SPLIT_COUNT];
for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
if (cclauses[i] == NULL_TREE
|| i == C_OMP_CLAUSE_SPLIT_SIMD
|| (i == C_OMP_CLAUSE_SPLIT_LOOP && code == OMP_LOOP))
p[i] = NULL;
else
p[i] = &cclauses[i];
do
{
int j = -1;
tree seen = NULL_TREE;
for (i = C_OMP_CLAUSE_SPLIT_COUNT - 1; i >= 0; i--)
if (p[i])
{
while (*p[i]
&& OMP_CLAUSE_CODE (*p[i]) != OMP_CLAUSE_ALLOCATE)
p[i] = &OMP_CLAUSE_CHAIN (*p[i]);
if (*p[i] == NULL_TREE)
{
i = C_OMP_CLAUSE_SPLIT_COUNT;
break;
}
if (!OMP_CLAUSE_ALLOCATE_COMBINED (*p[i]) && j == -1)
j = i;
seen = *p[i];
}
if (i == C_OMP_CLAUSE_SPLIT_COUNT)
break;
if (j == -1)
error_at (OMP_CLAUSE_LOCATION (seen),
"%qD specified in %<allocate%> clause but not in "
"an explicit privatization clause",
OMP_CLAUSE_DECL (seen));
for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
if (p[i])
{
if (i > j)
/* Remove. */
*p[i] = OMP_CLAUSE_CHAIN (*p[i]);
else
/* Keep. */
p[i] = &OMP_CLAUSE_CHAIN (*p[i]);
}
}
while (1);
}
}
if (!flag_checking)
return;

View File

@ -86,6 +86,7 @@ enum pragma_omp_clause {
PRAGMA_OMP_CLAUSE_NONE = 0,
PRAGMA_OMP_CLAUSE_ALIGNED,
PRAGMA_OMP_CLAUSE_ALLOCATE,
PRAGMA_OMP_CLAUSE_BIND,
PRAGMA_OMP_CLAUSE_COLLAPSE,
PRAGMA_OMP_CLAUSE_COPYIN,

View File

@ -12603,6 +12603,8 @@ c_parser_omp_clause_name (c_parser *parser)
case 'a':
if (!strcmp ("aligned", p))
result = PRAGMA_OMP_CLAUSE_ALIGNED;
else if (!strcmp ("allocate", p))
result = PRAGMA_OMP_CLAUSE_ALLOCATE;
else if (!strcmp ("async", p))
result = PRAGMA_OACC_CLAUSE_ASYNC;
else if (!strcmp ("attach", p))
@ -15112,6 +15114,62 @@ c_parser_omp_clause_aligned (c_parser *parser, tree list)
return nl;
}
/* OpenMP 5.0:
allocate ( variable-list )
allocate ( expression : variable-list ) */
static tree
c_parser_omp_clause_allocate (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
tree nl, c;
tree allocator = NULL_TREE;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if ((c_parser_next_token_is_not (parser, CPP_NAME)
&& c_parser_next_token_is_not (parser, CPP_KEYWORD))
|| (c_parser_peek_2nd_token (parser)->type != CPP_COMMA
&& c_parser_peek_2nd_token (parser)->type != CPP_CLOSE_PAREN))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
allocator = expr.value;
allocator = c_fully_fold (allocator, false, NULL);
tree orig_type
= expr.original_type ? expr.original_type : TREE_TYPE (allocator);
orig_type = TYPE_MAIN_VARIANT (orig_type);
if (!INTEGRAL_TYPE_P (TREE_TYPE (allocator))
|| TREE_CODE (orig_type) != ENUMERAL_TYPE
|| TYPE_NAME (orig_type) != get_identifier ("omp_allocator_handle_t"))
{
error_at (clause_loc, "%<allocate%> clause allocator expression "
"has type %qT rather than "
"%<omp_allocator_handle_t%>",
TREE_TYPE (allocator));
allocator = NULL_TREE;
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
parens.skip_until_found_close (parser);
return list;
}
}
nl = c_parser_omp_variable_list (parser, clause_loc,
OMP_CLAUSE_ALLOCATE, list);
if (allocator)
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_ALLOCATE_ALLOCATOR (c) = allocator;
parens.skip_until_found_close (parser);
return nl;
}
/* OpenMP 4.0:
linear ( variable-list )
linear ( variable-list : expression )
@ -16354,6 +16412,10 @@ c_parser_omp_all_clauses (c_parser *parser, omp_clause_mask mask,
clauses = c_parser_omp_clause_aligned (parser, clauses);
c_name = "aligned";
break;
case PRAGMA_OMP_CLAUSE_ALLOCATE:
clauses = c_parser_omp_clause_allocate (parser, clauses);
c_name = "allocate";
break;
case PRAGMA_OMP_CLAUSE_LINEAR:
clauses = c_parser_omp_clause_linear (parser, clauses);
c_name = "linear";
@ -18534,6 +18596,7 @@ c_parser_omp_simd (location_t loc, c_parser *parser,
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDER))
static tree
@ -18825,6 +18888,7 @@ c_parser_omp_sections_scope (location_t sections_loc, c_parser *parser)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
@ -18879,6 +18943,7 @@ c_parser_omp_sections (location_t loc, c_parser *parser,
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PROC_BIND))
static tree
@ -19020,6 +19085,7 @@ c_parser_omp_parallel (location_t loc, c_parser *parser,
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
@ -19054,6 +19120,7 @@ c_parser_omp_single (location_t loc, c_parser *parser, bool *if_p)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))
static tree
@ -19124,7 +19191,8 @@ c_parser_omp_taskyield (c_parser *parser)
*/
#define OMP_TASKGROUP_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASK_REDUCTION))
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASK_REDUCTION))
static tree
c_parser_omp_taskgroup (location_t loc, c_parser *parser, bool *if_p)
@ -19225,6 +19293,7 @@ c_parser_omp_cancellation_point (c_parser *parser, enum pragma_context context)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
@ -19314,6 +19383,7 @@ c_parser_omp_distribute (location_t loc, c_parser *parser,
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREAD_LIMIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT))
static tree
@ -19701,6 +19771,7 @@ c_parser_omp_target_exit_data (location_t loc, c_parser *parser,
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULTMAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR))
@ -21331,6 +21402,7 @@ c_finish_taskloop_clauses (tree clauses)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))

View File

@ -13795,6 +13795,7 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
/* 1 if normal/task reduction has been seen, -1 if inscan reduction
has been seen, -2 if mixed inscan/normal reduction diagnosed. */
int reduction_seen = 0;
bool allocate_seen = false;
bitmap_obstack_initialize (NULL);
bitmap_initialize (&generic_head, &bitmap_default_obstack);
@ -14344,6 +14345,29 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
bitmap_set_bit (&oacc_reduction_head, DECL_UID (t));
break;
case OMP_CLAUSE_ALLOCATE:
t = OMP_CLAUSE_DECL (c);
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in %<allocate%> clause", t);
remove = true;
}
else if (bitmap_bit_p (&aligned_head, DECL_UID (t)))
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"%qE appears more than once in %<allocate%> clauses",
t);
remove = true;
}
else
{
bitmap_set_bit (&aligned_head, DECL_UID (t));
if (!OMP_CLAUSE_ALLOCATE_COMBINED (c))
allocate_seen = true;
}
break;
case OMP_CLAUSE_DEPEND:
t = OMP_CLAUSE_DECL (c);
if (t == NULL_TREE)
@ -15041,10 +15065,27 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
reduction_seen = -2;
}
if (linear_variable_step_check || reduction_seen == -2)
if (linear_variable_step_check || reduction_seen == -2 || allocate_seen)
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
if (allocate_seen)
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_LINEAR:
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
case OMP_CLAUSE_TASK_REDUCTION:
if (DECL_P (OMP_CLAUSE_DECL (c)))
bitmap_clear_bit (&aligned_head,
DECL_UID (OMP_CLAUSE_DECL (c)));
break;
default:
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)
&& !bitmap_bit_p (&map_head,
@ -15065,6 +15106,25 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
pc = &OMP_CLAUSE_CHAIN (c);
}
if (allocate_seen)
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ALLOCATE
&& !OMP_CLAUSE_ALLOCATE_COMBINED (c)
&& bitmap_bit_p (&aligned_head, DECL_UID (OMP_CLAUSE_DECL (c))))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD specified in %<allocate%> clause but not in "
"an explicit privatization clause", OMP_CLAUSE_DECL (c));
remove = true;
}
if (remove)
*pc = OMP_CLAUSE_CHAIN (c);
else
pc = &OMP_CLAUSE_CHAIN (c);
}
if (nogroup_seen && reduction_seen)
{
error_at (OMP_CLAUSE_LOCATION (*nogroup_seen),

View File

@ -34127,6 +34127,8 @@ cp_parser_omp_clause_name (cp_parser *parser)
case 'a':
if (!strcmp ("aligned", p))
result = PRAGMA_OMP_CLAUSE_ALIGNED;
else if (!strcmp ("allocate", p))
result = PRAGMA_OMP_CLAUSE_ALLOCATE;
else if (!strcmp ("async", p))
result = PRAGMA_OACC_CLAUSE_ASYNC;
else if (!strcmp ("attach", p))
@ -36282,6 +36284,47 @@ cp_parser_omp_clause_aligned (cp_parser *parser, tree list)
return nlist;
}
/* OpenMP 5.0:
allocate ( variable-list )
allocate ( expression : variable-list ) */
static tree
cp_parser_omp_clause_allocate (cp_parser *parser, tree list)
{
tree nlist, c, allocator = NULL_TREE;
bool colon;
matching_parens parens;
if (!parens.require_open (parser))
return list;
cp_parser_parse_tentatively (parser);
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
allocator = cp_parser_assignment_expression (parser);
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
cp_parser_parse_definitely (parser);
cp_lexer_consume_token (parser->lexer);
if (allocator == error_mark_node)
allocator = NULL_TREE;
}
else
{
cp_parser_abort_tentative_parse (parser);
allocator = NULL_TREE;
}
nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_ALLOCATE, list,
&colon);
for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_ALLOCATE_ALLOCATOR (c) = allocator;
return nlist;
}
/* OpenMP 2.5:
lastprivate ( variable-list )
@ -37599,6 +37642,10 @@ cp_parser_omp_all_clauses (cp_parser *parser, omp_clause_mask mask,
clauses = cp_parser_omp_clause_aligned (parser, clauses);
c_name = "aligned";
break;
case PRAGMA_OMP_CLAUSE_ALLOCATE:
clauses = cp_parser_omp_clause_allocate (parser, clauses);
c_name = "allocate";
break;
case PRAGMA_OMP_CLAUSE_LINEAR:
{
bool declare_simd = false;
@ -39618,6 +39665,7 @@ cp_parser_omp_simd (cp_parser *parser, cp_token *pragma_tok,
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDER))
static tree
@ -39902,6 +39950,7 @@ cp_parser_omp_sections_scope (cp_parser *parser)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
@ -39952,6 +40001,7 @@ cp_parser_omp_sections (cp_parser *parser, cp_token *pragma_tok,
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PROC_BIND))
static tree
@ -40097,6 +40147,7 @@ cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok,
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
@ -40129,6 +40180,7 @@ cp_parser_omp_single (cp_parser *parser, cp_token *pragma_tok, bool *if_p)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))
static tree
@ -40193,7 +40245,8 @@ cp_parser_omp_taskyield (cp_parser *parser, cp_token *pragma_tok)
# pragma omp taskgroup taskgroup-clause[optseq] new-line */
#define OMP_TASKGROUP_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASK_REDUCTION))
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASK_REDUCTION))
static tree
cp_parser_omp_taskgroup (cp_parser *parser, cp_token *pragma_tok, bool *if_p)
@ -40302,6 +40355,7 @@ cp_parser_omp_cancellation_point (cp_parser *parser, cp_token *pragma_tok,
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
@ -40400,6 +40454,7 @@ cp_parser_omp_distribute (cp_parser *parser, cp_token *pragma_tok,
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREAD_LIMIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT))
static tree
@ -40806,6 +40861,7 @@ cp_parser_omp_target_update (cp_parser *parser, cp_token *pragma_tok,
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULTMAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR))
static bool
@ -42955,6 +43011,7 @@ cp_parser_omp_requires (cp_parser *parser, cp_token *pragma_tok)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))

View File

@ -17306,6 +17306,7 @@ tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort,
break;
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_ALLOCATE:
OMP_CLAUSE_DECL (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain,
in_decl, NULL);

View File

@ -6373,6 +6373,7 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
/* 1 if normal/task reduction has been seen, -1 if inscan reduction
has been seen, -2 if mixed inscan/normal reduction diagnosed. */
int reduction_seen = 0;
bool allocate_seen = false;
bitmap_obstack_initialize (NULL);
bitmap_initialize (&generic_head, &bitmap_default_obstack);
@ -7198,6 +7199,73 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
bitmap_set_bit (&oacc_reduction_head, DECL_UID (t));
break;
case OMP_CLAUSE_ALLOCATE:
t = OMP_CLAUSE_DECL (c);
if (t == current_class_ptr)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<this%> not allowed in %<allocate%> clause");
remove = true;
break;
}
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
if (processing_template_decl && TREE_CODE (t) != OVERLOAD)
break;
if (DECL_P (t))
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is not a variable in %<allocate%> clause", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in %<allocate%> clause", t);
remove = true;
}
else if (bitmap_bit_p (&aligned_head, DECL_UID (t)))
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"%qD appears more than once in %<allocate%> clauses",
t);
remove = true;
}
else
{
bitmap_set_bit (&aligned_head, DECL_UID (t));
allocate_seen = true;
}
t = OMP_CLAUSE_ALLOCATE_ALLOCATOR (c);
if (error_operand_p (t))
{
remove = true;
break;
}
if (t == NULL_TREE)
break;
tree allocatort;
allocatort = TYPE_MAIN_VARIANT (TREE_TYPE (t));
if (!type_dependent_expression_p (t)
&& (TREE_CODE (allocatort) != ENUMERAL_TYPE
|| TYPE_NAME (allocatort) == NULL_TREE
|| TREE_CODE (TYPE_NAME (allocatort)) != TYPE_DECL
|| (DECL_NAME (TYPE_NAME (allocatort))
!= get_identifier ("omp_allocator_handle_t"))
|| (TYPE_CONTEXT (allocatort)
!= DECL_CONTEXT (global_namespace))))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<allocate%> clause allocator expression has "
"type %qT rather than %<omp_allocator_handle_t%>",
TREE_TYPE (t));
remove = true;
}
else
{
t = mark_rvalue_use (t);
if (!processing_template_decl)
t = maybe_constant_value (t);
OMP_CLAUSE_ALLOCATE_ALLOCATOR (c) = t;
}
break;
case OMP_CLAUSE_DEPEND:
t = OMP_CLAUSE_DECL (c);
if (t == NULL_TREE)
@ -8169,6 +8237,12 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
{
const char *share_name = NULL;
if (allocate_seen
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SHARED
&& DECL_P (t))
bitmap_clear_bit (&aligned_head,
DECL_UID (OMP_CLAUSE_DECL (c)));
if (VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t))
share_name = "threadprivate";
else switch (cxx_omp_predetermined_sharing_1 (t))
@ -8262,6 +8336,25 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
pc = &OMP_CLAUSE_CHAIN (c);
}
if (allocate_seen)
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ALLOCATE
&& !OMP_CLAUSE_ALLOCATE_COMBINED (c)
&& bitmap_bit_p (&aligned_head, DECL_UID (OMP_CLAUSE_DECL (c))))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD specified in %<allocate%> clause but not in "
"an explicit privatization clause", OMP_CLAUSE_DECL (c));
remove = true;
}
if (remove)
*pc = OMP_CLAUSE_CHAIN (c);
else
pc = &OMP_CLAUSE_CHAIN (c);
}
bitmap_obstack_release (NULL);
return clauses;
}

View File

@ -9708,6 +9708,21 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
omp_add_variable (ctx, decl, GOVD_NONTEMPORAL);
break;
case OMP_CLAUSE_ALLOCATE:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
if (gimplify_expr (&OMP_CLAUSE_ALLOCATE_ALLOCATOR (c), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
break;
case OMP_CLAUSE_DEFAULT:
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
@ -10618,6 +10633,7 @@ gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p,
case OMP_CLAUSE_FINALIZE:
case OMP_CLAUSE_INCLUSIVE:
case OMP_CLAUSE_EXCLUSIVE:
case OMP_CLAUSE_ALLOCATE:
break;
default:
@ -12149,7 +12165,7 @@ gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
*gforo_clauses_ptr = c;
gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
break;
/* Taskloop clause we duplicate on both taskloops. */
/* Collapse clause we duplicate on both taskloops. */
case OMP_CLAUSE_COLLAPSE:
*gfor_clauses_ptr = c;
gfor_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
@ -12191,6 +12207,13 @@ gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
gtask_clauses_ptr
= &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr);
break;
/* Allocate clause we duplicate on task and inner taskloop. */
case OMP_CLAUSE_ALLOCATE:
*gfor_clauses_ptr = c;
gfor_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
*gtask_clauses_ptr = copy_node (c);
gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr);
break;
default:
gcc_unreachable ();
}

View File

@ -1486,6 +1486,7 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
case OMP_CLAUSE_TASK_REDUCTION:
case OMP_CLAUSE_ALLOCATE:
break;
case OMP_CLAUSE_ALIGNED:
@ -1653,6 +1654,7 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE_ALLOCATE:
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__REDUCTEMP_:
case OMP_CLAUSE_TO:

View File

@ -0,0 +1,76 @@
typedef enum omp_allocator_handle_t
#if __cplusplus >= 201103L
: __UINTPTR_TYPE__
#endif
{
omp_null_allocator = 0,
omp_default_mem_alloc = 1,
omp_large_cap_mem_alloc = 2,
omp_const_mem_alloc = 3,
omp_high_bw_mem_alloc = 4,
omp_low_lat_mem_alloc = 5,
omp_cgroup_mem_alloc = 6,
omp_pteam_mem_alloc = 7,
omp_thread_mem_alloc = 8,
__omp_allocator_handle_t_max__ = __UINTPTR_MAX__
} omp_allocator_handle_t;
int bar (int, int *, int);
omp_allocator_handle_t baz (void);
void
foo (int x, int z)
{
int y[16] = { 0 }, r = 0, i;
omp_allocator_handle_t h = baz ();
#pragma omp parallel allocate (x) allocate (omp_default_mem_alloc : y) \
allocate ((omp_allocator_handle_t) omp_default_mem_alloc:z) firstprivate (x, y, z)
bar (x, y, z);
#pragma omp task private (x) firstprivate (z) allocate (omp_low_lat_mem_alloc:x,z)
bar (0, &x, z);
#pragma omp taskwait
#pragma omp target teams distribute parallel for private (x) firstprivate (y) \
allocate ((omp_allocator_handle_t)(omp_default_mem_alloc + 0):z) \
allocate (omp_default_mem_alloc: x, y) allocate (omp_low_lat_mem_alloc: r) \
lastprivate (z) reduction(+:r)
for (i = 0; i < 64; i++)
{
z = bar (0, &x, 0);
r += bar (1, y, 0);
}
#pragma omp single private (x) allocate (h:x)
;
#pragma omp single allocate (*&h : x) private (x)
;
#pragma omp parallel shared (r, x, z)
#pragma omp single firstprivate (r) allocate (x, r, z) private (x, z)
;
#pragma omp for allocate (x) private (x)
for (i = 0; i < 64; i++)
x = 1;
#pragma omp sections private (x) allocate (omp_low_lat_mem_alloc: x)
{
x = 1;
#pragma omp section
x = 2;
#pragma omp section
x = 3;
}
#pragma omp taskgroup task_reduction(+:r) allocate (omp_default_mem_alloc : r)
#pragma omp task in_reduction(+:r) allocate (omp_default_mem_alloc : r)
r += bar (r, &r, 0);
#pragma omp teams private (x) firstprivate (y) allocate (h : x, y)
bar (x, y, 0);
#pragma omp taskloop lastprivate (x) reduction (+:r) allocate (h : x, r)
for (i = 0; i < 16; i++)
{
r += bar (0, &r, 0);
x = i;
}
#pragma omp taskgroup task_reduction(+:r) allocate (omp_default_mem_alloc : r)
#pragma omp taskloop firstprivate (x) in_reduction (+:r) \
allocate (omp_default_mem_alloc : x, r)
for (i = 0; i < 16; i++)
r += bar (x, &r, 0);
#pragma omp taskwait
}

View File

@ -0,0 +1,45 @@
typedef enum omp_allocator_handle_t
#if __cplusplus >= 201103L
: __UINTPTR_TYPE__
#endif
{
omp_null_allocator = 0,
omp_default_mem_alloc = 1,
omp_large_cap_mem_alloc = 2,
omp_const_mem_alloc = 3,
omp_high_bw_mem_alloc = 4,
omp_low_lat_mem_alloc = 5,
omp_cgroup_mem_alloc = 6,
omp_pteam_mem_alloc = 7,
omp_thread_mem_alloc = 8,
__omp_allocator_handle_t_max__ = __UINTPTR_MAX__
} omp_allocator_handle_t;
int bar (int, int *, int);
omp_allocator_handle_t baz (void);
void
foo (int x, int z)
{
int i;
#pragma omp task allocate (x) /* { dg-error "'x' specified in 'allocate' clause but not in an explicit privatization clause" } */
bar (x, &x, 0);
#pragma omp taskwait
#pragma omp parallel allocate (x) /* { dg-error "'x' specified in 'allocate' clause but not in an explicit privatization clause" } */
bar (x, &x, 0);
#pragma omp parallel for simd private (x) allocate (x) /* { dg-error "'x' specified in 'allocate' clause but not in an explicit privatization clause" } */
for (i = 0; i < 16; i++)
x = i;
#pragma omp parallel allocate (foo) /* { dg-error "'\[^\n\r]*foo\[^\n\r]*' is not a variable in 'allocate' clause" } */
;
#pragma omp parallel allocate (x) shared (x) /* { dg-error "'x' specified in 'allocate' clause but not in an explicit privatization clause" } */
bar (x, &x, 0);
#pragma omp parallel private (x) allocate (x) allocate (x) /* { dg-warning "'x' appears more than once in 'allocate' clauses" } */
bar (x, &x, 0);
#pragma omp parallel private (x) allocate (x, x) /* { dg-warning "'x' appears more than once in 'allocate' clauses" } */
bar (x, &x, 0);
#pragma omp parallel private (x) allocate (0.0 : x) /* { dg-error "'allocate' clause allocator expression has type 'double' rather than 'omp_allocator_handle_t'" } */
bar (x, &x, 0);
#pragma omp parallel private (x) allocate (0 : x) /* { dg-error "'allocate' clause allocator expression has type 'int' rather than 'omp_allocator_handle_t'" } */
bar (x, &x, 0);
}

View File

@ -1,6 +1,23 @@
/* { dg-do compile } */
/* { dg-additional-options "-std=c99" { target c } } */
typedef enum omp_allocator_handle_t
#if __cplusplus >= 201103L
: __UINTPTR_TYPE__
#endif
{
omp_null_allocator = 0,
omp_default_mem_alloc = 1,
omp_large_cap_mem_alloc = 2,
omp_const_mem_alloc = 3,
omp_high_bw_mem_alloc = 4,
omp_low_lat_mem_alloc = 5,
omp_cgroup_mem_alloc = 6,
omp_pteam_mem_alloc = 7,
omp_thread_mem_alloc = 8,
__omp_allocator_handle_t_max__ = __UINTPTR_MAX__
} omp_allocator_handle_t;
int t;
#pragma omp threadprivate (t)
@ -14,20 +31,20 @@ foo (int d, int m, int i1, int i2, int p, int *idp, int s,
#pragma omp distribute parallel for \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) order(concurrent)
lastprivate (l) schedule(static, 4) order(concurrent) allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute parallel for simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) nontemporal(ntm) \
safelen(8) simdlen(4) aligned(q: 32) order(concurrent)
safelen(8) simdlen(4) aligned(q: 32) order(concurrent) allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
safelen(8) simdlen(4) aligned(q: 32) reduction(+:r) if(i1) nontemporal(ntm) \
order(concurrent)
order(concurrent) allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
}
@ -49,20 +66,20 @@ baz (int d, int m, int i1, int i2, int p, int *idp, int s,
#pragma omp distribute parallel for \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) copyin(t) order(concurrent)
lastprivate (l) schedule(static, 4) copyin(t) order(concurrent) allocate (p)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute parallel for simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) nontemporal(ntm) \
safelen(8) simdlen(4) aligned(q: 32) copyin(t) order(concurrent)
safelen(8) simdlen(4) aligned(q: 32) copyin(t) order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
safelen(8) simdlen(4) aligned(q: 32) reduction(+:r) if(i1) nontemporal(ntm) \
order(concurrent)
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp loop bind(parallel) order(concurrent) \
@ -77,28 +94,28 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
{
#pragma omp for simd \
private (p) firstprivate (f) lastprivate (l) linear (ll:1) reduction(+:r) schedule(static, 4) collapse(1) nowait \
safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) if(i1) order(concurrent)
safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) if(i1) order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel for \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1)
lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel for \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) order(concurrent)
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel for simd \
private (p) firstprivate (f) if (i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \
safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) order(concurrent)
safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel sections \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l)
lastprivate (l) allocate (f)
{
#pragma omp section
{}
@ -108,35 +125,39 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
#pragma omp target parallel \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
nowait depend(inout: dd[0])
nowait depend(inout: dd[0]) allocate (omp_default_mem_alloc:f)
;
#pragma omp target parallel for \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) nowait depend(inout: dd[0])
lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) nowait depend(inout: dd[0]) \
allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target parallel for \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) nowait depend(inout: dd[0]) order(concurrent)
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) nowait depend(inout: dd[0]) order(concurrent) \
allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target parallel for simd \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \
safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3) order(concurrent)
safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3) order(concurrent) \
allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0])
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0]) \
allocate (omp_default_mem_alloc:f)
;
#pragma omp target teams distribute \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) nowait depend(inout: dd[0])
collapse(1) dist_schedule(static, 16) nowait depend(inout: dd[0]) allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
;
#pragma omp target teams distribute parallel for \
@ -144,7 +165,8 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) nowait depend(inout: dd[0]) order(concurrent)
lastprivate (l) schedule(static, 4) nowait depend(inout: dd[0]) order(concurrent) \
allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams distribute parallel for simd \
@ -153,47 +175,50 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3)
safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3) \
allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams distribute simd \
device(d) map (tofrom: m) if (i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm)
safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) \
allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target simd \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
safelen(8) simdlen(4) lastprivate (l) linear(ll: 1) aligned(q: 32) reduction(+:r) \
nowait depend(inout: dd[0]) nontemporal(ntm) if(simd:i3) order(concurrent)
nowait depend(inout: dd[0]) nontemporal(ntm) if(simd:i3) order(concurrent) \
allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskgroup task_reduction(+:r2)
#pragma omp taskgroup task_reduction(+:r2) allocate (r2)
#pragma omp taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
order(concurrent)
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskgroup task_reduction(+:r)
#pragma omp taskgroup task_reduction(+:r) allocate (r)
#pragma omp taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(i1) final(fi) mergeable nogroup priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) in_reduction(+:r) nontemporal(ntm) \
order(concurrent)
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskwait
#pragma omp taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) if(taskloop: i1) final(fi) priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r) if (simd: i3) nontemporal(ntm) \
order(concurrent)
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target nowait depend(inout: dd[0])
#pragma omp teams distribute \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16)
collapse(1) dist_schedule(static, 16) allocate (omp_default_mem_alloc: f)
for (int i = 0; i < 64; i++)
;
#pragma omp target
@ -201,7 +226,7 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) order(concurrent)
lastprivate (l) schedule(static, 4) order(concurrent) allocate (omp_default_mem_alloc: f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target
@ -210,21 +235,23 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm)
safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm) \
allocate (omp_default_mem_alloc: f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target
#pragma omp teams distribute simd \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm)
safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm) \
allocate (omp_default_mem_alloc: f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp teams distribute parallel for \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) order(concurrent) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) copyin(t)
lastprivate (l) schedule(static, 4) copyin(t) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp teams distribute parallel for simd \
@ -232,65 +259,66 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm) copyin(t)
safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm) copyin(t) \
allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp teams distribute simd \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm)
safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm) allocate(f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) reduction(+:r) \
num_threads (nth) proc_bind(spread) copyin(t)
num_threads (nth) proc_bind(spread) copyin(t) allocate (f)
;
#pragma omp taskgroup task_reduction (+:r2)
#pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp) \
reduction(default, +:r) in_reduction(+:r2)
reduction(default, +:r) in_reduction(+:r2) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskgroup task_reduction (+:r2)
#pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
order(concurrent)
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp) \
reduction(default, +:r) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t)
reduction(default, +:r) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) \
order(concurrent)
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskgroup task_reduction (+:r2)
#pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
reduction(default, +:r) in_reduction(+:r2)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskgroup task_reduction (+:r2)
#pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
order(concurrent)
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
reduction(default, +:r) num_threads (nth) proc_bind(spread) copyin(t)
reduction(default, +:r) num_threads (nth) proc_bind(spread) copyin(t) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) num_threads (nth) proc_bind(spread) copyin(t) \
order(concurrent)
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp loop bind(thread) order(concurrent) \
@ -299,46 +327,50 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
ll++;
#pragma omp parallel loop \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) collapse(1) bind(parallel) order(concurrent)
lastprivate (l) collapse(1) bind(parallel) order(concurrent) allocate (f)
for (l = 0; l < 64; l++)
ll++;
#pragma omp parallel loop \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) collapse(1)
lastprivate (l) collapse(1) allocate (f)
for (l = 0; l < 64; l++)
ll++;
#pragma omp teams loop \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) lastprivate (l) bind(teams)
collapse(1) lastprivate (l) bind(teams) allocate (f)
for (l = 0; l < 64; ++l)
;
#pragma omp teams loop \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) lastprivate (l) order(concurrent)
collapse(1) lastprivate (l) order(concurrent) allocate (f)
for (l = 0; l < 64; ++l)
;
#pragma omp target parallel loop \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
nowait depend(inout: dd[0]) lastprivate (l) bind(parallel) order(concurrent) collapse(1)
nowait depend(inout: dd[0]) lastprivate (l) bind(parallel) order(concurrent) collapse(1) \
allocate (omp_default_mem_alloc: f)
for (l = 0; l < 64; ++l)
;
#pragma omp target parallel loop \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
nowait depend(inout: dd[0]) lastprivate (l) order(concurrent) collapse(1)
nowait depend(inout: dd[0]) lastprivate (l) order(concurrent) collapse(1) \
allocate (omp_default_mem_alloc: f)
for (l = 0; l < 64; ++l)
;
#pragma omp target teams loop \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0]) \
lastprivate (l) bind(teams) collapse(1)
lastprivate (l) bind(teams) collapse(1) \
allocate (omp_default_mem_alloc: f)
for (l = 0; l < 64; ++l)
;
#pragma omp target teams loop \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0]) \
lastprivate (l) order(concurrent) collapse(1)
lastprivate (l) order(concurrent) collapse(1) \
allocate (omp_default_mem_alloc: f)
for (l = 0; l < 64; ++l)
;
}

View File

@ -276,6 +276,9 @@ enum omp_clause_code {
/* OpenMP clause: aligned (variable-list[:alignment]). */
OMP_CLAUSE_ALIGNED,
/* OpenMP clause: allocate ([allocator:]variable-list). */
OMP_CLAUSE_ALLOCATE,
/* OpenMP clause: depend ({in,out,inout}:variable-list). */
OMP_CLAUSE_DEPEND,

View File

@ -1441,6 +1441,7 @@ convert_nonlocal_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
}
/* FALLTHRU */
case OMP_CLAUSE_NONTEMPORAL:
do_decl_clause_no_supp:
/* Like do_decl_clause, but don't add any suppression. */
decl = OMP_CLAUSE_DECL (clause);
if (VAR_P (decl)
@ -1453,6 +1454,16 @@ convert_nonlocal_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
}
break;
case OMP_CLAUSE_ALLOCATE:
if (OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause))
{
wi->val_only = true;
wi->is_lhs = false;
convert_nonlocal_reference_op
(&OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause), &dummy, wi);
}
goto do_decl_clause_no_supp;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
@ -2203,6 +2214,7 @@ convert_local_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
}
/* FALLTHRU */
case OMP_CLAUSE_NONTEMPORAL:
do_decl_clause_no_supp:
/* Like do_decl_clause, but don't add any suppression. */
decl = OMP_CLAUSE_DECL (clause);
if (VAR_P (decl)
@ -2221,6 +2233,16 @@ convert_local_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
}
break;
case OMP_CLAUSE_ALLOCATE:
if (OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause))
{
wi->val_only = true;
wi->is_lhs = false;
convert_local_reference_op
(&OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause), &dummy, wi);
}
goto do_decl_clause_no_supp;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:

View File

@ -712,6 +712,19 @@ dump_omp_clause (pretty_printer *pp, tree clause, int spc, dump_flags_t flags)
pp_right_paren (pp);
break;
case OMP_CLAUSE_ALLOCATE:
pp_string (pp, "allocate(");
if (OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause))
{
dump_generic_node (pp, OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause),
spc, flags, false);
pp_colon (pp);
}
dump_generic_node (pp, OMP_CLAUSE_DECL (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_DEPEND:
pp_string (pp, "depend(");
switch (OMP_CLAUSE_DEPEND_KIND (clause))

View File

@ -291,6 +291,7 @@ unsigned const char omp_clause_num_ops[] =
1, /* OMP_CLAUSE_COPYPRIVATE */
3, /* OMP_CLAUSE_LINEAR */
2, /* OMP_CLAUSE_ALIGNED */
2, /* OMP_CLAUSE_ALLOCATE */
1, /* OMP_CLAUSE_DEPEND */
1, /* OMP_CLAUSE_NONTEMPORAL */
1, /* OMP_CLAUSE_UNIFORM */
@ -375,6 +376,7 @@ const char * const omp_clause_code_name[] =
"copyprivate",
"linear",
"aligned",
"allocate",
"depend",
"nontemporal",
"uniform",
@ -12213,6 +12215,7 @@ walk_tree_1 (tree *tp, walk_tree_fn func, void *data,
WALK_SUBTREE_TAIL (OMP_CLAUSE_CHAIN (*tp));
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_ALLOCATE:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_MAP:

View File

@ -1731,6 +1731,16 @@ class auto_suppress_location_wrappers
#define OMP_CLAUSE_ALIGNED_ALIGNMENT(NODE) \
OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ALIGNED), 1)
#define OMP_CLAUSE_ALLOCATE_ALLOCATOR(NODE) \
OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ALLOCATE), 1)
/* True if an ALLOCATE clause was present on a combined or composite
construct and the code for splitting the clauses has already performed
checking if the listed variable has explicit privatization on the
construct. */
#define OMP_CLAUSE_ALLOCATE_COMBINED(NODE) \
(OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ALLOCATE)->base.public_flag)
#define OMP_CLAUSE_NUM_TEAMS_EXPR(NODE) \
OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_NUM_TEAMS), 0)