diff --git a/gcc/c-family/c-omp.c b/gcc/c-family/c-omp.c
index 476abc124b249388f13da95c42d5bb139c9c8f27..b606cf4b5381ac2fd45a11d90468c13cb6919449 100644
--- a/gcc/c-family/c-omp.c
+++ b/gcc/c-family/c-omp.c
@@ -2114,14 +2114,31 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
 	    }
 	  s = C_OMP_CLAUSE_SPLIT_PARALLEL;
 	  break;
-	/* order clauses are allowed on for, simd and loop.  */
+	/* order clauses are allowed on distribute, for, simd and loop.  */
 	case OMP_CLAUSE_ORDER:
+	  if ((mask & (OMP_CLAUSE_MASK_1
+		       << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
+	    {
+	      if (code == OMP_DISTRIBUTE)
+		{
+		  s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
+		  break;
+		}
+	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
+				    OMP_CLAUSE_ORDER);
+	      OMP_CLAUSE_ORDER_UNCONSTRAINED (c)
+		= OMP_CLAUSE_ORDER_UNCONSTRAINED (clauses);
+	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
+	      cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c;
+	    }
 	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
 	    {
 	      if (code == OMP_SIMD)
 		{
 		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
 					OMP_CLAUSE_ORDER);
+		  OMP_CLAUSE_ORDER_UNCONSTRAINED (c)
+		    = OMP_CLAUSE_ORDER_UNCONSTRAINED (clauses);
 		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
 		  cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
 		  s = C_OMP_CLAUSE_SPLIT_SIMD;
diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c
index c5e222bfeef5e7ad0cd6309f04b5958210cca1d5..fb1399e300d4df4fbbed6493477fbd1b066f79ce 100644
--- a/gcc/c/c-parser.c
+++ b/gcc/c/c-parser.c
@@ -14610,7 +14610,14 @@ c_parser_oacc_clause_wait (c_parser *parser, tree list)
 
 
 /* OpenMP 5.0:
-   order ( concurrent ) */
+   order ( concurrent )
+
+   OpenMP 5.1:
+   order ( order-modifier : concurrent )
+
+   order-modifier:
+     reproducible
+     unconstrained  */
 
 static tree
 c_parser_omp_clause_order (c_parser *parser, tree list)
@@ -14618,10 +14625,26 @@ c_parser_omp_clause_order (c_parser *parser, tree list)
   location_t loc = c_parser_peek_token (parser)->location;
   tree c;
   const char *p;
+  bool unconstrained = false;
 
   matching_parens parens;
   if (!parens.require_open (parser))
     return list;
+  if (c_parser_next_token_is (parser, CPP_NAME)
+      && c_parser_peek_2nd_token (parser)->type == CPP_COLON)
+    {
+      p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
+      if (strcmp (p, "unconstrained") == 0)
+	unconstrained = true;
+      else if (strcmp (p, "reproducible") != 0)
+	{
+	  c_parser_error (parser, "expected %<reproducible%> or "
+				  "%<unconstrained%>");
+	  goto out_err;
+	}
+      c_parser_consume_token (parser);
+      c_parser_consume_token (parser);
+    }
   if (!c_parser_next_token_is (parser, CPP_NAME))
     {
       c_parser_error (parser, "expected %<concurrent%>");
@@ -14635,8 +14658,9 @@ c_parser_omp_clause_order (c_parser *parser, tree list)
     }
   c_parser_consume_token (parser);
   parens.skip_until_found_close (parser);
-  /* check_no_duplicate_clause (list, OMP_CLAUSE_ORDER, "order"); */
+  check_no_duplicate_clause (list, OMP_CLAUSE_ORDER, "order");
   c = build_omp_clause (loc, OMP_CLAUSE_ORDER);
+  OMP_CLAUSE_ORDER_UNCONSTRAINED (c) = unconstrained;
   OMP_CLAUSE_CHAIN (c) = list;
   return c;
 
@@ -20250,7 +20274,8 @@ c_parser_omp_cancellation_point (c_parser *parser, enum pragma_context context)
 	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE)	\
 	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\
 	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE)	\
-	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
+	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE)	\
+	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDER))
 
 static tree
 c_parser_omp_distribute (location_t loc, c_parser *parser,
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index b9ed38dbafb9d9bcccad48e9870d47aff929d27e..62908daa5b70b5103ebc77ffc4af0aa20e95226c 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -37703,18 +37703,42 @@ cp_parser_omp_clause_defaultmap (cp_parser *parser, tree list,
 }
 
 /* OpenMP 5.0:
-   order ( concurrent ) */
+   order ( concurrent )
+
+   OpenMP 5.1:
+   order ( order-modifier : concurrent )
+
+   order-modifier:
+     reproducible
+     unconstrained  */
 
 static tree
 cp_parser_omp_clause_order (cp_parser *parser, tree list, location_t location)
 {
   tree c, id;
   const char *p;
+  bool unconstrained = false;
 
   matching_parens parens;
   if (!parens.require_open (parser))
     return list;
 
+  if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)
+      && cp_lexer_nth_token_is (parser->lexer, 2, CPP_COLON))
+    {
+      id = cp_lexer_peek_token (parser->lexer)->u.value;
+      p = IDENTIFIER_POINTER (id);
+      if (strcmp (p, "unconstrained") == 0)
+	unconstrained = true;
+      else if (strcmp (p, "reproducible") != 0)
+	{
+	  cp_parser_error (parser, "expected %<reproducible%> or "
+				   "%<unconstrained%>");
+	  goto out_err;
+	}
+      cp_lexer_consume_token (parser->lexer);
+      cp_lexer_consume_token (parser->lexer);
+    }
   if (!cp_lexer_next_token_is (parser->lexer, CPP_NAME))
     {
       cp_parser_error (parser, "expected %<concurrent%>");
@@ -37734,8 +37758,9 @@ cp_parser_omp_clause_order (cp_parser *parser, tree list, location_t location)
   if (!parens.require_close (parser))
     goto out_err;
 
-  /* check_no_duplicate_clause (list, OMP_CLAUSE_ORDER, "order", location); */
+  check_no_duplicate_clause (list, OMP_CLAUSE_ORDER, "order", location);
   c = build_omp_clause (location, OMP_CLAUSE_ORDER);
+  OMP_CLAUSE_ORDER_UNCONSTRAINED (c) = unconstrained;
   OMP_CLAUSE_CHAIN (c) = list;
   return c;
 
@@ -43346,7 +43371,8 @@ cp_parser_omp_cancellation_point (cp_parser *parser, cp_token *pragma_tok,
 	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE)	\
 	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\
 	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE)	\
-	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
+	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE)	\
+	| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDER))
 
 static tree
 cp_parser_omp_distribute (cp_parser *parser, cp_token *pragma_tok,
diff --git a/gcc/testsuite/c-c++-common/gomp/clause-dups-1.c b/gcc/testsuite/c-c++-common/gomp/clause-dups-1.c
index 604caf0ba918a6db7c8930337aa1ec5ebeb0c356..a17f68dfb6b293bf16d1fc1aff5d9f4a0f7f4b1f 100644
--- a/gcc/testsuite/c-c++-common/gomp/clause-dups-1.c
+++ b/gcc/testsuite/c-c++-common/gomp/clause-dups-1.c
@@ -27,6 +27,12 @@ f1 (int *p)
   for (i = 0; i < 8; ++i)
     f0 ();
   #pragma omp for nowait nowait					/* { dg-error "too many 'nowait' clauses" } */
+  for (i = 0; i < 8; ++i)
+    f0 ();
+  #pragma omp for schedule(static) order(concurrent) order(concurrent)	/* { dg-error "too many 'order' clauses" } */
+  for (i = 0; i < 8; ++i)
+    f0 ();
+  #pragma omp for schedule(static) order(reproducible:concurrent) order(unconstrained:concurrent)	/* { dg-error "too many 'order' clauses" } */
   for (i = 0; i < 8; ++i)
     f0 ();
   #pragma omp simd collapse(1) collapse(1)			/* { dg-error "too many 'collapse' clauses" } */
@@ -207,6 +213,18 @@ f1 (int *p)
   f0 ();
   #pragma omp scope nowait nowait				/* { dg-error "too many 'nowait' clauses" } */
   ;
+  #pragma omp loop bind(thread) order(concurrent) order(concurrent)	/* { dg-error "too many 'order' clauses" } */
+  for (i = 0; i < 8; ++i)
+    f0 ();
+  #pragma omp loop bind(thread) order(reproducible:concurrent) order(unconstrained:concurrent)	/* { dg-error "too many 'order' clauses" } */
+  for (i = 0; i < 8; ++i)
+    f0 ();
+  #pragma omp simd order(concurrent) order(concurrent)	/* { dg-error "too many 'order' clauses" } */
+  for (i = 0; i < 8; ++i)
+    f0 ();
+  #pragma omp simd order(reproducible:concurrent) order(unconstrained:concurrent)	/* { dg-error "too many 'order' clauses" } */
+  for (i = 0; i < 8; ++i)
+    f0 ();
 }
 
 #pragma omp declare simd simdlen (4) simdlen (4)		/* { dg-error "too many 'simdlen' clauses" } */
@@ -223,3 +241,17 @@ void f6 (int a, int b);
 void f7 (int a, int b);
 #pragma omp declare simd linear (a) uniform (a)			/* { dg-error "'a' appears more than once in data clauses" } */
 void f8 (int a, int b);
+
+#pragma omp declare target
+void
+f9 (void)
+{
+  int i;
+  #pragma omp distribute dist_schedule(static) order(concurrent) order(concurrent)	/* { dg-error "too many 'order' clauses" } */
+  for (i = 0; i < 8; ++i)
+    f0 ();
+  #pragma omp loop bind(thread) order(reproducible:concurrent) order(unconstrained:concurrent)	/* { dg-error "too many 'order' clauses" } */
+  for (i = 0; i < 8; ++i)
+    f0 ();
+}
+#pragma omp end declare target
diff --git a/gcc/testsuite/c-c++-common/gomp/clauses-1.c b/gcc/testsuite/c-c++-common/gomp/clauses-1.c
index 378c7bf55a3e112ed313110fef52de5fda86dccf..742132f202eb873f962e0f605428dcec77dc0b8e 100644
--- a/gcc/testsuite/c-c++-common/gomp/clauses-1.c
+++ b/gcc/testsuite/c-c++-common/gomp/clauses-1.c
@@ -66,14 +66,27 @@ baz (int d, int m, int i1, int i2, int p, int *idp, int s,
   #pragma omp distribute parallel for \
     private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
     if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
-    lastprivate (l) schedule(static, 4) copyin(t) order(concurrent) allocate (p)
+    lastprivate (l) schedule(static, 4) copyin(t) allocate (p)
+  for (int i = 0; i < 64; i++)
+    ll++;
+  #pragma omp distribute parallel for \
+    private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
+    if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
+    lastprivate (l) schedule(static, 4) order(concurrent) allocate (p)
   for (int i = 0; i < 64; i++)
     ll++;
   #pragma omp distribute parallel for simd \
     private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
     if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
     lastprivate (l) schedule(static, 4) nontemporal(ntm) \
-    safelen(8) simdlen(4) aligned(q: 32) copyin(t) order(concurrent) allocate (f)
+    safelen(8) simdlen(4) aligned(q: 32) copyin(t) allocate (f)
+  for (int i = 0; i < 64; i++)
+    ll++;
+  #pragma omp distribute parallel for simd \
+    private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
+    if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
+    lastprivate (l) schedule(static, 4) nontemporal(ntm) \
+    safelen(8) simdlen(4) aligned(q: 32) order(concurrent) allocate (f)
   for (int i = 0; i < 64; i++)
     ll++;
   #pragma omp distribute simd \
@@ -156,7 +169,7 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
     ;
   #pragma omp target teams distribute \
     device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
-    shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
+    shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) order(concurrent) \
     collapse(1) dist_schedule(static, 16) nowait depend(inout: dd[0]) allocate (omp_default_mem_alloc:f) in_reduction(+:r2)
   for (int i = 0; i < 64; i++)
     ;
@@ -218,7 +231,7 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
   #pragma omp target nowait depend(inout: dd[0]) in_reduction(+:r2)
   #pragma omp teams distribute \
     private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
-    collapse(1) dist_schedule(static, 16) allocate (omp_default_mem_alloc: f)
+    collapse(1) dist_schedule(static, 16) allocate (omp_default_mem_alloc: f) order(concurrent)
   for (int i = 0; i < 64; i++)
     ;
   #pragma omp target
@@ -249,20 +262,36 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
     ll++;
   #pragma omp teams distribute parallel for \
     private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
-    collapse(1) dist_schedule(static, 16) order(concurrent) \
+    collapse(1) dist_schedule(static, 16) \
     if (parallel: i2) num_threads (nth) proc_bind(spread) \
     lastprivate (l) schedule(static, 4) copyin(t) allocate (f)
   for (int i = 0; i < 64; i++)
     ll++;
+  #pragma omp teams distribute parallel for \
+    private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
+    collapse(1) dist_schedule(static, 16) order(concurrent) \
+    if (parallel: i2) num_threads (nth) proc_bind(spread) \
+    lastprivate (l) schedule(static, 4) allocate (f)
+  for (int i = 0; i < 64; i++)
+    ll++;
   #pragma omp teams distribute parallel for simd \
     private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
     collapse(1) dist_schedule(static, 16) \
     if (parallel: i2) num_threads (nth) proc_bind(spread) \
-    lastprivate (l) schedule(static, 4) order(concurrent) \
+    lastprivate (l) schedule(static, 4) \
     safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm) copyin(t) \
     allocate (f)
   for (int i = 0; i < 64; i++)
     ll++;
+  #pragma omp teams distribute parallel for simd \
+    private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
+    collapse(1) dist_schedule(static, 16) \
+    if (parallel: i2) num_threads (nth) proc_bind(spread) \
+    lastprivate (l) schedule(static, 4) order(concurrent) \
+    safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm) \
+    allocate (f)
+  for (int i = 0; i < 64; i++)
+    ll++;
   #pragma omp teams distribute simd \
     private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
     collapse(1) dist_schedule(static, 16) order(concurrent) \
diff --git a/gcc/testsuite/c-c++-common/gomp/order-1.c b/gcc/testsuite/c-c++-common/gomp/order-1.c
index da4b73d50869da89c3dc3630160c60c8aa76fb13..547d06190a46ecd04e0e3e81c4e8d284f14f0425 100644
--- a/gcc/testsuite/c-c++-common/gomp/order-1.c
+++ b/gcc/testsuite/c-c++-common/gomp/order-1.c
@@ -27,6 +27,9 @@ f2 (int *a)
   for (i = 0; i < 128; i++)
     a[i]++;
   #pragma omp teams distribute parallel for simd order(concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp teams distribute order(concurrent)
   for (i = 0; i < 128; i++)
     a[i]++;
   #pragma omp teams
@@ -37,17 +40,11 @@ f2 (int *a)
     #pragma omp distribute parallel for simd order(concurrent)
     for (i = 0; i < 128; i++)
       a[i]++;
+    #pragma omp distribute order(concurrent)
+    for (i = 0; i < 128; i++)
+      a[i]++;
   }
   #pragma omp taskloop simd order (concurrent)
   for (i = 0; i < 128; i++)
     a[i]++;
 }
-
-void
-f3 (int *a)
-{
-  int i;
-  #pragma omp for order(concurrent) order(concurrent) order(concurrent)
-  for (i = 0; i < 128; i++)
-    a[i]++;
-}
diff --git a/gcc/testsuite/c-c++-common/gomp/order-2.c b/gcc/testsuite/c-c++-common/gomp/order-2.c
index 1a9adb09dcb0f71258dcb5fff48531249b178ae5..5e044dc65c6052b608800da8eb9bffcb0c8cb305 100644
--- a/gcc/testsuite/c-c++-common/gomp/order-2.c
+++ b/gcc/testsuite/c-c++-common/gomp/order-2.c
@@ -24,7 +24,7 @@ f2 (int *a)
 {
   int i;
   #pragma omp teams
-  #pragma omp distribute order(concurrent)	/* { dg-error "'order' is not valid for '#pragma omp distribute'" } */
+  #pragma omp distribute order(concurrent)
   for (i = 0; i < 128; i++)
     a[i]++;
   #pragma omp taskloop order (concurrent)	/* { dg-error "'order' is not valid for '#pragma omp taskloop'" } */
diff --git a/gcc/testsuite/c-c++-common/gomp/order-5.c b/gcc/testsuite/c-c++-common/gomp/order-5.c
new file mode 100644
index 0000000000000000000000000000000000000000..17cc8b5ce0f592b854492e8c264c1fac048467c7
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/order-5.c
@@ -0,0 +1,101 @@
+void
+f1 (int *a)
+{
+  int i;
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp simd order ( reproducible : concurrent )
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp for simd order(reproducible :concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+}
+
+void
+f2 (int *a)
+{
+  int i;
+  #pragma omp parallel for order(reproducible: concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp parallel for simd order (reproducible:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp teams distribute parallel for order(reproducible:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp teams distribute parallel for simd order(reproducible:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp teams distribute order(reproducible:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp teams
+  {
+    #pragma omp distribute parallel for order(reproducible:concurrent)
+    for (i = 0; i < 128; i++)
+      a[i]++;
+    #pragma omp distribute parallel for simd order(reproducible:concurrent)
+    for (i = 0; i < 128; i++)
+      a[i]++;
+    #pragma omp distribute order(reproducible:concurrent)
+    for (i = 0; i < 128; i++)
+      a[i]++;
+  }
+  #pragma omp taskloop simd order (reproducible:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+}
+
+void
+f3 (int *a)
+{
+  int i;
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp simd order ( unconstrained : concurrent )
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp for simd order(unconstrained :concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+}
+
+void
+f4 (int *a)
+{
+  int i;
+  #pragma omp parallel for order(unconstrained: concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp parallel for simd order (unconstrained:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp teams distribute parallel for order(unconstrained:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp teams distribute parallel for simd order(unconstrained:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp teams distribute order(unconstrained:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+  #pragma omp teams
+  {
+    #pragma omp distribute parallel for order(unconstrained:concurrent)
+    for (i = 0; i < 128; i++)
+      a[i]++;
+    #pragma omp distribute parallel for simd order(unconstrained:concurrent)
+    for (i = 0; i < 128; i++)
+      a[i]++;
+    #pragma omp distribute order(unconstrained:concurrent)
+    for (i = 0; i < 128; i++)
+      a[i]++;
+  }
+  #pragma omp taskloop simd order (unconstrained:concurrent)
+  for (i = 0; i < 128; i++)
+    a[i]++;
+}
diff --git a/gcc/testsuite/c-c++-common/gomp/order-6.c b/gcc/testsuite/c-c++-common/gomp/order-6.c
new file mode 100644
index 0000000000000000000000000000000000000000..2127830e247f56d74f17f17b039ddcc3f0c468c2
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/order-6.c
@@ -0,0 +1,412 @@
+void foo (void);
+int v;
+#ifdef __cplusplus
+extern "C" {
+#endif
+int omp_get_thread_num (void);
+int omp_get_num_threads (void);
+int omp_target_is_present (const void *, int);
+int omp_get_cancellation (void);
+#ifdef __cplusplus
+}
+#endif
+
+void
+f1 (int *a)
+{
+  int i;
+  #pragma omp simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp parallel		/* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp simd
+      for (j = 0; j < 64; j++)
+	a[64 * i + j] = i + j;
+    }
+  #pragma omp simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp critical		/* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp ordered simd		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      v++;
+    }
+  #pragma omp simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic read		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      a[i] = v;				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic write		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      v = a[i];				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_thread_num ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_num_threads ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_target_is_present (a + i, 0);	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_cancellation ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+}
+
+void
+f2 (int *a)
+{
+  int i;
+  #pragma omp for simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp parallel		/* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp for simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp simd
+      for (j = 0; j < 64; j++)
+	a[64 * i + j] = i + j;
+    }
+  #pragma omp for simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp critical		/* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp for simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp ordered simd		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp for simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      v++;
+    }
+  #pragma omp for simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic read		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      a[i] = v;				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp for simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic write		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      v = a[i];				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp for simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_thread_num ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_num_threads ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_target_is_present (a + i, 0);	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for simd order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_cancellation ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+}
+
+void
+f3 (int *a)
+{
+  int i;
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp parallel
+      foo ();
+    }
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp simd
+      for (j = 0; j < 64; j++)
+	a[64 * i + j] = i + j;
+    }
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp critical		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp ordered simd		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      v++;
+    }
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic read		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      a[i] = v;				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic write		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      v = a[i];				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp task			/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      a[i]++;
+    }
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp taskloop		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      for (j = 0; j < 64; j++)
+	a[64 * i + j] = i + j;
+    }
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_thread_num ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_num_threads ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_target_is_present (a + i, 0);	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for order(reproducible:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_cancellation ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+}
+
+void
+f4 (int *a)
+{
+  int i;
+  #pragma omp simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp parallel		/* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp simd
+      for (j = 0; j < 64; j++)
+	a[64 * i + j] = i + j;
+    }
+  #pragma omp simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp critical		/* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp ordered simd		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      v++;
+    }
+  #pragma omp simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic read		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      a[i] = v;				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic write		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      v = a[i];				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_thread_num ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_num_threads ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_target_is_present (a + i, 0);	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_cancellation ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+}
+
+void
+f5 (int *a)
+{
+  int i;
+  #pragma omp for simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp parallel		/* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp for simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp simd
+      for (j = 0; j < 64; j++)
+	a[64 * i + j] = i + j;
+    }
+  #pragma omp for simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp critical		/* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp for simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp ordered simd		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp for simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      v++;
+    }
+  #pragma omp for simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic read		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      a[i] = v;				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp for simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic write		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      v = a[i];				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp for simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_thread_num ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_num_threads ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_target_is_present (a + i, 0);	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for simd order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_cancellation ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+}
+
+void
+f6 (int *a)
+{
+  int i;
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp parallel
+      foo ();
+    }
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp simd
+      for (j = 0; j < 64; j++)
+	a[64 * i + j] = i + j;
+    }
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp critical		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp ordered simd		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      v++;
+    }
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic read		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      a[i] = v;				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic write		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+      v = a[i];				/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp task			/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      a[i]++;
+    }
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp taskloop		/* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+      for (j = 0; j < 64; j++)
+	a[64 * i + j] = i + j;
+    }
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_thread_num ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_num_threads ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_target_is_present (a + i, 0);	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+  #pragma omp for order(unconstrained:concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_cancellation ();	/* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+}
diff --git a/gcc/testsuite/g++.dg/gomp/attrs-1.C b/gcc/testsuite/g++.dg/gomp/attrs-1.C
index c871c51728fc3d1888f1519d44ec5cacbf7bb843..2a5f2cf63234415b513c12d4839074311ba9db37 100644
--- a/gcc/testsuite/g++.dg/gomp/attrs-1.C
+++ b/gcc/testsuite/g++.dg/gomp/attrs-1.C
@@ -63,7 +63,7 @@ foo (int d, int m, int i1, int i2, int p, int *idp, int s,
     ll++;
   [[omp::directive (distribute
     private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
-    allocate (omp_default_mem_alloc:f))]]
+    allocate (omp_default_mem_alloc:f) order(concurrent))]]
   for (int i = 0; i < 64; i++)
     ll++;
 }
@@ -85,14 +85,27 @@ baz (int d, int m, int i1, int i2, int p, int *idp, int s,
   [[omp::directive (distribute parallel for
     private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
     if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
-    lastprivate (l) schedule(static, 4) copyin(t) order(concurrent) allocate (p))]]
+    lastprivate (l) schedule(static, 4) copyin(t) allocate (p))]]
+  for (int i = 0; i < 64; i++)
+    ll++;
+  [[omp::directive (distribute parallel for
+    private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
+    if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+    lastprivate (l) schedule(static, 4) order(concurrent) allocate (p))]]
   for (int i = 0; i < 64; i++)
     ll++;
   [[omp::directive (distribute parallel for simd
     private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
     if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
     lastprivate (l) schedule(static, 4) nontemporal(ntm)
-    safelen(8) simdlen(4) aligned(q: 32) copyin(t) order(concurrent) allocate (f))]]
+    safelen(8) simdlen(4) aligned(q: 32) copyin(t) allocate (f))]]
+  for (int i = 0; i < 64; i++)
+    ll++;
+  [[omp::directive (distribute parallel for simd
+    private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
+    if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+    lastprivate (l) schedule(static, 4) nontemporal(ntm)
+    safelen(8) simdlen(4) aligned(q: 32) order(concurrent) allocate (f))]]
   for (int i = 0; i < 64; i++)
     ll++;
   [[omp::directive (distribute simd
@@ -207,7 +220,7 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
     ;
   [[omp::sequence (omp::directive (target teams distribute
     device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
-    shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+    shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) order(concurrent)
     collapse(1) dist_schedule(static, 16) nowait depend(inout: dd[0]) allocate (omp_default_mem_alloc:f) in_reduction(+:r2)))]]
   for (int i = 0; i < 64; i++)
     ;
@@ -292,7 +305,7 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
   [[omp::sequence (directive (target nowait depend(inout: dd[0]) in_reduction(+:r2)),
     directive (teams distribute
     private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
-    collapse(1) dist_schedule(static, 16) allocate (omp_default_mem_alloc: f)))]]
+    collapse(1) dist_schedule(static, 16) allocate (omp_default_mem_alloc: f) order(concurrent)))]]
   for (int i = 0; i < 64; i++)
     ;
   [[omp::directive (teams
@@ -327,20 +340,36 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
     ll++;
   [[omp::directive (teams distribute parallel for
     private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
-    collapse(1) dist_schedule(static, 16) order(concurrent)
+    collapse(1) dist_schedule(static, 16)
     if (parallel: i2) num_threads (nth) proc_bind(spread)
     lastprivate (l) schedule(static, 4) copyin(t) allocate (f))]]
   for (int i = 0; i < 64; i++)
     ll++;
+  [[omp::directive (teams distribute parallel for
+    private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+    collapse(1) dist_schedule(static, 16) order(concurrent)
+    if (parallel: i2) num_threads (nth) proc_bind(spread)
+    lastprivate (l) schedule(static, 4) allocate (f))]]
+  for (int i = 0; i < 64; i++)
+    ll++;
   [[omp::directive (teams distribute parallel for simd
     private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
     collapse(1) dist_schedule(static, 16)
     if (parallel: i2) num_threads (nth) proc_bind(spread)
-    lastprivate (l) schedule(static, 4) order(concurrent)
+    lastprivate (l) schedule(static, 4)
     safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm) copyin(t)
     allocate (f))]]
   for (int i = 0; i < 64; i++)
     ll++;
+  [[omp::directive (teams distribute parallel for simd
+    private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+    collapse(1) dist_schedule(static, 16)
+    if (parallel: i2) num_threads (nth) proc_bind(spread)
+    lastprivate (l) schedule(static, 4) order(concurrent)
+    safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm)
+    allocate (f))]]
+  for (int i = 0; i < 64; i++)
+    ll++;
   [[omp::directive (teams distribute simd
     private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
     collapse(1) dist_schedule(static, 16) order(concurrent)
diff --git a/gcc/testsuite/g++.dg/gomp/attrs-2.C b/gcc/testsuite/g++.dg/gomp/attrs-2.C
index 5ec19b32fb0e3b63ab8af416f29d918047feafdb..c00be7f1db71e46bac71cfb2b642e9190dbdd3d8 100644
--- a/gcc/testsuite/g++.dg/gomp/attrs-2.C
+++ b/gcc/testsuite/g++.dg/gomp/attrs-2.C
@@ -63,7 +63,7 @@ foo (int d, int m, int i1, int i2, int p, int *idp, int s,
     ll++;
   [[omp::directive (distribute,
     private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
-    allocate (omp_default_mem_alloc:f))]]
+    allocate (omp_default_mem_alloc:f),order(concurrent))]]
   for (int i = 0; i < 64; i++)
     ll++;
 }
@@ -85,14 +85,27 @@ baz (int d, int m, int i1, int i2, int p, int *idp, int s,
   [[omp::directive (distribute parallel for,
     private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
     if (parallel: i2),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
-    lastprivate (l),schedule(static, 4),copyin(t),order(concurrent),allocate (p))]]
+    lastprivate (l),schedule(static, 4),copyin(t),allocate (p))]]
+  for (int i = 0; i < 64; i++)
+    ll++;
+  [[omp::directive (distribute parallel for,
+    private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
+    if (parallel: i2),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
+    lastprivate (l),schedule(static, 4),order(concurrent),allocate (p))]]
   for (int i = 0; i < 64; i++)
     ll++;
   [[omp::directive (distribute parallel for simd,
     private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
     if (parallel: i2),if(simd: i1),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
     lastprivate (l),schedule(static, 4),nontemporal(ntm),
-    safelen(8),simdlen(4),aligned(q: 32),copyin(t),order(concurrent),allocate (f))]]
+    safelen(8),simdlen(4),aligned(q: 32),copyin(t),allocate (f))]]
+  for (int i = 0; i < 64; i++)
+    ll++;
+  [[omp::directive (distribute parallel for simd,
+    private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
+    if (parallel: i2),if(simd: i1),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
+    lastprivate (l),schedule(static, 4),nontemporal(ntm),
+    safelen(8),simdlen(4),aligned(q: 32),order(concurrent),allocate (f))]]
   for (int i = 0; i < 64; i++)
     ll++;
   [[omp::directive (distribute simd,
@@ -207,7 +220,7 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
     ;
   [[omp::sequence (omp::directive (target teams distribute,
     device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
-    shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+    shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),order(concurrent),
     collapse(1),dist_schedule(static, 16),nowait depend(inout: dd[0]),allocate (omp_default_mem_alloc:f),in_reduction(+:r2)))]]
   for (int i = 0; i < 64; i++)
     ;
@@ -292,7 +305,7 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
   [[omp::sequence (directive (target, nowait,depend(inout: dd[0]),in_reduction(+:r2)),
     directive (teams distribute,
     private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
-    collapse(1),dist_schedule(static, 16),allocate (omp_default_mem_alloc: f)))]]
+    collapse(1),dist_schedule(static, 16),allocate (omp_default_mem_alloc: f),order(concurrent)))]]
   for (int i = 0; i < 64; i++)
     ;
   [[omp::directive (teams,
@@ -327,20 +340,36 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
     ll++;
   [[omp::directive (teams distribute parallel for,
     private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
-    collapse(1),dist_schedule(static, 16),order(concurrent),
+    collapse(1),dist_schedule(static, 16),
     if (parallel: i2),num_threads (nth),proc_bind(spread),
     lastprivate (l),schedule(static, 4),copyin(t),allocate (f))]]
   for (int i = 0; i < 64; i++)
     ll++;
+  [[omp::directive (teams distribute parallel for,
+    private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+    collapse(1),dist_schedule(static, 16),order(concurrent),
+    if (parallel: i2),num_threads (nth),proc_bind(spread),
+    lastprivate (l),schedule(static, 4),allocate (f))]]
+  for (int i = 0; i < 64; i++)
+    ll++;
   [[omp::directive (teams distribute parallel for simd,
     private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
     collapse(1),dist_schedule(static, 16),
     if (parallel: i2),num_threads (nth),proc_bind(spread),
-    lastprivate (l),schedule(static, 4),order(concurrent),
+    lastprivate (l),schedule(static, 4),
     safelen(8),simdlen(4),aligned(q: 32),if (simd: i3),nontemporal(ntm),copyin(t),
     allocate (f))]]
   for (int i = 0; i < 64; i++)
     ll++;
+  [[omp::directive (teams distribute parallel for simd,
+    private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+    collapse(1),dist_schedule(static, 16),
+    if (parallel: i2),num_threads (nth),proc_bind(spread),
+    lastprivate (l),schedule(static, 4),order(concurrent),
+    safelen(8),simdlen(4),aligned(q: 32),if (simd: i3),nontemporal(ntm),
+    allocate (f))]]
+  for (int i = 0; i < 64; i++)
+    ll++;
   [[omp::directive (teams distribute simd,
     private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
     collapse(1),dist_schedule(static, 16),order(concurrent),
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index 32225b8bca8bdb0b3ea56e805a5da2bc096aeee6..7de12f33fef8833890f84b4efa4216bd15af7895 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -1149,7 +1149,10 @@ dump_omp_clause (pretty_printer *pp, tree clause, int spc, dump_flags_t flags)
       break;
 
     case OMP_CLAUSE_ORDER:
-      pp_string (pp, "order(concurrent)");
+      pp_string (pp, "order(");
+      if (OMP_CLAUSE_ORDER_UNCONSTRAINED (clause))
+	pp_string (pp, "unconstrained:");
+      pp_string (pp, "concurrent)");
       break;
 
     case OMP_CLAUSE_BIND:
diff --git a/gcc/tree.h b/gcc/tree.h
index 8477f8914cbaca1172643f9df1301304cbb34565..091ad3d3777ab80932e40d1c90c8f86f47fbbcec 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -1715,6 +1715,10 @@ class auto_suppress_location_wrappers
 #define OMP_CLAUSE_ORDERED_EXPR(NODE) \
   OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ORDERED), 0)
 
+/* True for unconstrained modifier on order(concurrent) clause.  */
+#define OMP_CLAUSE_ORDER_UNCONSTRAINED(NODE) \
+  (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ORDER)->base.public_flag)
+
 #define OMP_CLAUSE_REDUCTION_CODE(NODE)	\
   (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_REDUCTION, \
      OMP_CLAUSE_IN_REDUCTION)->omp_clause.subcode.reduction_code)