From 22568cc62cabbea1eec20ef65e1da9b0caaa6f02 Mon Sep 17 00:00:00 2001
From: Jakub Jelinek <jakub@redhat.com>
Date: Mon, 4 Dec 2006 11:26:00 +0100
Subject: [PATCH] re PR middle-end/29947 (OpenMP parallel for fails for
 reversed loop range)

	PR libgomp/29947
	* omp-low.c (expand_omp_for_static_nochunk,
	expand_omp_for_static_chunk): Do all arithmetics in signed rather than
	unsigned type.

	* loop.c (gomp_loop_init): Make parameters signed.  Set ws->end to
	start if there shouldn't be any loop iterations.
	(gomp_loop_ordered_static_start): Remove start == end test.
	* testsuite/libgomp.c/pr29947-1.c: New test.
	* testsuite/libgomp.c/pr29947-2.c: New test.

From-SVN: r119485
---
 gcc/ChangeLog                           |   5 +
 gcc/omp-low.c                           |  50 ++--
 libgomp/ChangeLog                       |   9 +
 libgomp/loop.c                          |  12 +-
 libgomp/testsuite/libgomp.c/pr29947-1.c | 328 ++++++++++++++++++++++++
 libgomp/testsuite/libgomp.c/pr29947-2.c | 328 ++++++++++++++++++++++++
 6 files changed, 699 insertions(+), 33 deletions(-)
 create mode 100644 libgomp/testsuite/libgomp.c/pr29947-1.c
 create mode 100644 libgomp/testsuite/libgomp.c/pr29947-2.c

diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index d544736b8743..8b65b1630d7c 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,10 @@
 2006-12-04  Jakub Jelinek  <jakub@redhat.com>
 
+	PR libgomp/29947
+	* omp-low.c (expand_omp_for_static_nochunk,
+	expand_omp_for_static_chunk): Do all arithmetics in signed rather than
+	unsigned type.
+
 	PR middle-end/29965
 	* omp-low.c (determine_parallel_type): Don't try to optimize combined
 	parallels if region->exit or region->inner->exit is NULL.
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 30dbc713e8da..f58ee1f1b6f6 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -2771,13 +2771,12 @@ expand_omp_for_static_nochunk (struct omp_region *region,
 			       struct omp_for_data *fd)
 {
   tree l0, l1, l2, n, q, s0, e0, e, t, nthreads, threadid;
-  tree type, utype, list;
+  tree type, list;
   basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
   basic_block fin_bb;
   block_stmt_iterator si;
 
   type = TREE_TYPE (fd->v);
-  utype = lang_hooks.types.unsigned_type (type);
 
   entry_bb = region->entry;
   seq_start_bb = create_empty_bb (entry_bb);
@@ -2795,12 +2794,12 @@ expand_omp_for_static_nochunk (struct omp_region *region,
 
   t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
   t = build_function_call_expr (t, NULL);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   nthreads = get_formal_tmp_var (t, &list);
   
   t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
   t = build_function_call_expr (t, NULL);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   threadid = get_formal_tmp_var (t, &list);
 
   fd->n1 = fold_convert (type, fd->n1);
@@ -2820,25 +2819,25 @@ expand_omp_for_static_nochunk (struct omp_region *region,
   t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
   t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
   t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   if (is_gimple_val (t))
     n = t;
   else
     n = get_formal_tmp_var (t, &list);
 
-  t = build2 (TRUNC_DIV_EXPR, utype, n, nthreads);
+  t = build2 (TRUNC_DIV_EXPR, type, n, nthreads);
   q = get_formal_tmp_var (t, &list);
 
-  t = build2 (MULT_EXPR, utype, q, nthreads);
-  t = build2 (NE_EXPR, utype, t, n);
-  t = build2 (PLUS_EXPR, utype, q, t);
+  t = build2 (MULT_EXPR, type, q, nthreads);
+  t = build2 (NE_EXPR, type, t, n);
+  t = build2 (PLUS_EXPR, type, q, t);
   q = get_formal_tmp_var (t, &list);
 
-  t = build2 (MULT_EXPR, utype, q, threadid);
+  t = build2 (MULT_EXPR, type, q, threadid);
   s0 = get_formal_tmp_var (t, &list);
 
-  t = build2 (PLUS_EXPR, utype, s0, q);
-  t = build2 (MIN_EXPR, utype, t, n);
+  t = build2 (PLUS_EXPR, type, s0, q);
+  t = build2 (MIN_EXPR, type, t, n);
   e0 = get_formal_tmp_var (t, &list);
 
   t = build2 (GE_EXPR, boolean_type_node, s0, e0);
@@ -2944,14 +2943,13 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
 {
   tree l0, l1, l2, l3, l4, n, s0, e0, e, t;
   tree trip, nthreads, threadid;
-  tree type, utype;
+  tree type;
   basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
   basic_block trip_update_bb, cont_bb, fin_bb;
   tree list;
   block_stmt_iterator si;
 
   type = TREE_TYPE (fd->v);
-  utype = lang_hooks.types.unsigned_type (type);
 
   entry_bb = region->entry;
   iter_part_bb = create_empty_bb (entry_bb);
@@ -2973,12 +2971,12 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
 
   t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
   t = build_function_call_expr (t, NULL);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   nthreads = get_formal_tmp_var (t, &list);
   
   t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
   t = build_function_call_expr (t, NULL);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   threadid = get_formal_tmp_var (t, &list);
 
   fd->n1 = fold_convert (type, fd->n1);
@@ -2993,7 +2991,7 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
   if (!is_gimple_val (fd->step))
     fd->step = get_formal_tmp_var (fd->step, &list);
 
-  fd->chunk_size = fold_convert (utype, fd->chunk_size);
+  fd->chunk_size = fold_convert (type, fd->chunk_size);
   if (!is_gimple_val (fd->chunk_size))
     fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &list);
 
@@ -3002,13 +3000,13 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
   t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
   t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
   t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
-  t = fold_convert (utype, t);
+  t = fold_convert (type, t);
   if (is_gimple_val (t))
     n = t;
   else
     n = get_formal_tmp_var (t, &list);
 
-  t = build_int_cst (utype, 0);
+  t = build_int_cst (type, 0);
   trip = get_initialized_tmp_var (t, &list, NULL);
 
   si = bsi_last (entry_bb);
@@ -3019,13 +3017,13 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
   /* Iteration space partitioning goes in ITER_PART_BB.  */
   list = alloc_stmt_list ();
 
-  t = build2 (MULT_EXPR, utype, trip, nthreads);
-  t = build2 (PLUS_EXPR, utype, t, threadid);
-  t = build2 (MULT_EXPR, utype, t, fd->chunk_size);
+  t = build2 (MULT_EXPR, type, trip, nthreads);
+  t = build2 (PLUS_EXPR, type, t, threadid);
+  t = build2 (MULT_EXPR, type, t, fd->chunk_size);
   s0 = get_formal_tmp_var (t, &list);
 
-  t = build2 (PLUS_EXPR, utype, s0, fd->chunk_size);
-  t = build2 (MIN_EXPR, utype, t, n);
+  t = build2 (PLUS_EXPR, type, s0, fd->chunk_size);
+  t = build2 (MIN_EXPR, type, t, n);
   e0 = get_formal_tmp_var (t, &list);
 
   t = build2 (LT_EXPR, boolean_type_node, s0, n);
@@ -3075,8 +3073,8 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
   /* Trip update code goes into TRIP_UPDATE_BB.  */
   list = alloc_stmt_list ();
 
-  t = build_int_cst (utype, 1);
-  t = build2 (PLUS_EXPR, utype, trip, t);
+  t = build_int_cst (type, 1);
+  t = build2 (PLUS_EXPR, type, trip, t);
   t = build2 (MODIFY_EXPR, void_type_node, trip, t);
   gimplify_and_add (t, &list);
 
diff --git a/libgomp/ChangeLog b/libgomp/ChangeLog
index 7b19325f2f1d..7d6cce68a983 100644
--- a/libgomp/ChangeLog
+++ b/libgomp/ChangeLog
@@ -1,3 +1,12 @@
+2006-12-04  Jakub Jelinek  <jakub@redhat.com>
+
+	PR libgomp/29947
+	* loop.c (gomp_loop_init): Make parameters signed.  Set ws->end to
+	start if there shouldn't be any loop iterations.
+	(gomp_loop_ordered_static_start): Remove start == end test.
+	* testsuite/libgomp.c/pr29947-1.c: New test.
+	* testsuite/libgomp.c/pr29947-2.c: New test.
+
 2006-12-02  Eric Botcazou  <ebotcazou@libertysurf.fr>
 
 	* configure.tgt: Force initial-exec TLS model on Linux only.
diff --git a/libgomp/loop.c b/libgomp/loop.c
index 3d1b1efaf313..58fd9a8af283 100644
--- a/libgomp/loop.c
+++ b/libgomp/loop.c
@@ -34,13 +34,14 @@
 /* Initialize the given work share construct from the given arguments.  */
 
 static inline void
-gomp_loop_init (struct gomp_work_share *ws, unsigned long start,
-		unsigned long end, unsigned long incr,
-		enum gomp_schedule_type sched, unsigned long chunk_size)
+gomp_loop_init (struct gomp_work_share *ws, long start, long end, long incr,
+		enum gomp_schedule_type sched, long chunk_size)
 {
   ws->sched = sched;
   ws->chunk_size = chunk_size;
-  ws->end = end;
+  /* Canonicalize loops that have zero iterations to ->next == ->end.  */
+  ws->end = ((incr > 0 && start > end) || (incr < 0 && start < end))
+	    ? start : end;
   ws->incr = incr;
   ws->next = start;
 }
@@ -148,9 +149,6 @@ gomp_loop_ordered_static_start (long start, long end, long incr,
 {
   struct gomp_thread *thr = gomp_thread ();
 
-  if (start == end)
-    return false;
-
   if (gomp_work_share_start (true))
     {
       gomp_loop_init (thr->ts.work_share, start, end, incr,
diff --git a/libgomp/testsuite/libgomp.c/pr29947-1.c b/libgomp/testsuite/libgomp.c/pr29947-1.c
new file mode 100644
index 000000000000..78b40ac5ae5a
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/pr29947-1.c
@@ -0,0 +1,328 @@
+/* PR libgomp/29947 */
+/* { dg-options "-O2 -fopenmp" } */
+/* { dg-do run } */
+
+extern void abort (void);
+
+int cnt;
+
+void
+test1 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (dynamic)
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test2 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (dynamic)
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test3 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (guided)
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test4 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (guided)
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test5 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (dynamic) ordered
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test6 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (dynamic) ordered
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test7 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (guided) ordered
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test8 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (guided) ordered
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test9 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (dynamic)
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test10 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (dynamic)
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test11 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (guided)
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test12 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (guided)
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test13 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (dynamic) ordered
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test14 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (dynamic) ordered
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test15 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (guided) ordered
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test16 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (guided) ordered
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+int
+__attribute__((noinline))
+test (long j1, long k1, long j2, long k2)
+{
+  test1 (j1, k1, j2, k2);
+  test2 (j1, k1, j2, k2);
+  test3 (j1, k1, j2, k2);
+  test4 (j1, k1, j2, k2);
+  test5 (j1, k1, j2, k2);
+  test6 (j1, k1, j2, k2);
+  test7 (j1, k1, j2, k2);
+  test8 (j1, k1, j2, k2);
+  test9 (j1, k1, j2, k2);
+  test10 (j1, k1, j2, k2);
+  test11 (j1, k1, j2, k2);
+  test12 (j1, k1, j2, k2);
+  test13 (j1, k1, j2, k2);
+  test14 (j1, k1, j2, k2);
+  test15 (j1, k1, j2, k2);
+  test16 (j1, k1, j2, k2);
+  return cnt;
+}
+
+int
+main (void)
+{
+  test (1, 5, 1, 5);
+  test (5, 5, 5, 5);
+  test (5, 4, 5, 4);
+  test (5, 1, 5, 1);
+  return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/pr29947-2.c b/libgomp/testsuite/libgomp.c/pr29947-2.c
new file mode 100644
index 000000000000..231cd5d27217
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/pr29947-2.c
@@ -0,0 +1,328 @@
+/* PR libgomp/29947 */
+/* { dg-options "-O2 -fopenmp" } */
+/* { dg-do run } */
+
+extern void abort (void);
+
+int cnt;
+
+void
+test1 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static)
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test2 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static)
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test3 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static, 1)
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test4 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static, 1)
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test5 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static) ordered
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test6 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static) ordered
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test7 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static, 1) ordered
+    for (i = j1; i <= k1; ++i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test8 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel reduction (+:e,c)
+  {
+#pragma omp for schedule (static, 1) ordered
+    for (i = k1; i >= j1; --i)
+      {
+	if (i < j2 || i > k2)
+	  ++e;
+#pragma omp ordered
+	++c;
+      }
+#pragma omp atomic
+    ++cnt;
+  }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test9 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static)
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test10 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static)
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test11 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static, 1)
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test12 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static, 1)
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test13 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static) ordered
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test14 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static) ordered
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test15 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered
+  for (i = j1; i <= k1; ++i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+void
+test16 (long j1, long k1, long j2, long k2)
+{
+  long i, e = 0, c = 0;
+#pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered
+  for (i = k1; i >= j1; --i)
+    {
+      if (i < j2 || i > k2)
+	++e;
+#pragma omp ordered
+      ++c;
+    }
+  if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
+    abort ();
+}
+
+int
+__attribute__((noinline))
+test (long j1, long k1, long j2, long k2)
+{
+  test1 (j1, k1, j2, k2);
+  test2 (j1, k1, j2, k2);
+  test3 (j1, k1, j2, k2);
+  test4 (j1, k1, j2, k2);
+  test5 (j1, k1, j2, k2);
+  test6 (j1, k1, j2, k2);
+  test7 (j1, k1, j2, k2);
+  test8 (j1, k1, j2, k2);
+  test9 (j1, k1, j2, k2);
+  test10 (j1, k1, j2, k2);
+  test11 (j1, k1, j2, k2);
+  test12 (j1, k1, j2, k2);
+  test13 (j1, k1, j2, k2);
+  test14 (j1, k1, j2, k2);
+  test15 (j1, k1, j2, k2);
+  test16 (j1, k1, j2, k2);
+  return cnt;
+}
+
+int
+main (void)
+{
+  test (1, 5, 1, 5);
+  test (5, 5, 5, 5);
+  test (5, 4, 5, 4);
+  test (5, 1, 5, 1);
+  return 0;
+}
-- 
GitLab