diff --git a/gcc/gimple-range-cache.cc b/gcc/gimple-range-cache.cc
index 6e73ac706ac6e215be096ebed81bfe9e6375313c..25ade1300aff401cb2c548a9f70ab87cf3271bff 100644
--- a/gcc/gimple-range-cache.cc
+++ b/gcc/gimple-range-cache.cc
@@ -75,7 +75,7 @@ ssa_block_ranges::dump (FILE *f)
 class sbr_vector : public ssa_block_ranges
 {
 public:
-  sbr_vector (tree t, irange_allocator *allocator);
+  sbr_vector (tree t, vrange_allocator *allocator);
 
   virtual bool set_bb_range (const_basic_block bb, const irange &r) override;
   virtual bool get_bb_range (irange &r, const_basic_block bb) override;
@@ -86,20 +86,21 @@ protected:
   int_range<2> m_varying;
   int_range<2> m_undefined;
   tree m_type;
-  irange_allocator *m_irange_allocator;
+  vrange_allocator *m_range_allocator;
   void grow ();
 };
 
 
 // Initialize a block cache for an ssa_name of type T.
 
-sbr_vector::sbr_vector (tree t, irange_allocator *allocator)
+sbr_vector::sbr_vector (tree t, vrange_allocator *allocator)
 {
   gcc_checking_assert (TYPE_P (t));
   m_type = t;
-  m_irange_allocator = allocator;
+  m_range_allocator = allocator;
   m_tab_size = last_basic_block_for_fn (cfun) + 1;
-  m_tab = (irange **)allocator->get_memory (m_tab_size * sizeof (irange *));
+  m_tab = static_cast <irange **>
+    (allocator->alloc (m_tab_size * sizeof (irange *)));
   memset (m_tab, 0, m_tab_size * sizeof (irange *));
 
   // Create the cached type range.
@@ -121,8 +122,8 @@ sbr_vector::grow ()
   int new_size = inc + curr_bb_size;
 
   // Allocate new memory, copy the old vector and clear the new space.
-  irange **t = (irange **)m_irange_allocator->get_memory (new_size
-							  * sizeof (irange *));
+  irange **t = static_cast <irange **>
+    (m_range_allocator->alloc (new_size * sizeof (irange *)));
   memcpy (t, m_tab, m_tab_size * sizeof (irange *));
   memset (t + m_tab_size, 0, (new_size - m_tab_size) * sizeof (irange *));
 
@@ -143,7 +144,7 @@ sbr_vector::set_bb_range (const_basic_block bb, const irange &r)
   else if (r.undefined_p ())
     m = &m_undefined;
   else
-    m = m_irange_allocator->allocate (r);
+    m = m_range_allocator->clone (r);
   m_tab[bb->index] = m;
   return true;
 }
@@ -191,14 +192,14 @@ sbr_vector::bb_range_p (const_basic_block bb)
 class sbr_sparse_bitmap : public ssa_block_ranges
 {
 public:
-  sbr_sparse_bitmap (tree t, irange_allocator *allocator, bitmap_obstack *bm);
+  sbr_sparse_bitmap (tree t, vrange_allocator *allocator, bitmap_obstack *bm);
   virtual bool set_bb_range (const_basic_block bb, const irange &r) override;
   virtual bool get_bb_range (irange &r, const_basic_block bb) override;
   virtual bool bb_range_p (const_basic_block bb) override;
 private:
   void bitmap_set_quad (bitmap head, int quad, int quad_value);
   int bitmap_get_quad (const_bitmap head, int quad);
-  irange_allocator *m_irange_allocator;
+  vrange_allocator *m_range_allocator;
   irange *m_range[SBR_NUM];
   bitmap_head bitvec;
   tree m_type;
@@ -206,23 +207,25 @@ private:
 
 // Initialize a block cache for an ssa_name of type T.
 
-sbr_sparse_bitmap::sbr_sparse_bitmap (tree t, irange_allocator *allocator,
-				bitmap_obstack *bm)
+sbr_sparse_bitmap::sbr_sparse_bitmap (tree t, vrange_allocator *allocator,
+				      bitmap_obstack *bm)
 {
   gcc_checking_assert (TYPE_P (t));
   m_type = t;
   bitmap_initialize (&bitvec, bm);
   bitmap_tree_view (&bitvec);
-  m_irange_allocator = allocator;
+  m_range_allocator = allocator;
   // Pre-cache varying.
-  m_range[0] = m_irange_allocator->allocate (2);
+  m_range[0] = static_cast <irange *> (m_range_allocator->alloc_vrange (t));
   m_range[0]->set_varying (t);
   // Pre-cache zero and non-zero values for pointers.
   if (POINTER_TYPE_P (t))
     {
-      m_range[1] = m_irange_allocator->allocate (2);
+      m_range[1]
+	= static_cast <irange *> (m_range_allocator->alloc_vrange (t));
       m_range[1]->set_nonzero (t);
-      m_range[2] = m_irange_allocator->allocate (2);
+      m_range[2]
+	= static_cast <irange *> (m_range_allocator->alloc_vrange (t));
       m_range[2]->set_zero (t);
     }
   else
@@ -267,7 +270,7 @@ sbr_sparse_bitmap::set_bb_range (const_basic_block bb, const irange &r)
     if (!m_range[x] || r == *(m_range[x]))
       {
 	if (!m_range[x])
-	  m_range[x] = m_irange_allocator->allocate (r);
+	  m_range[x] = m_range_allocator->clone (r);
 	bitmap_set_quad (&bitvec, bb->index, x + 1);
 	return true;
       }
@@ -312,14 +315,14 @@ block_range_cache::block_range_cache ()
   bitmap_obstack_initialize (&m_bitmaps);
   m_ssa_ranges.create (0);
   m_ssa_ranges.safe_grow_cleared (num_ssa_names);
-  m_irange_allocator = new irange_allocator;
+  m_range_allocator = new vrange_allocator;
 }
 
 // Remove any m_block_caches which have been created.
 
 block_range_cache::~block_range_cache ()
 {
-  delete m_irange_allocator;
+  delete m_range_allocator;
   // Release the vector itself.
   m_ssa_ranges.release ();
   bitmap_obstack_release (&m_bitmaps);
@@ -341,17 +344,17 @@ block_range_cache::set_bb_range (tree name, const_basic_block bb,
       // Use sparse representation if there are too many basic blocks.
       if (last_basic_block_for_fn (cfun) > param_evrp_sparse_threshold)
 	{
-	  void *r = m_irange_allocator->get_memory (sizeof (sbr_sparse_bitmap));
+	  void *r = m_range_allocator->alloc (sizeof (sbr_sparse_bitmap));
 	  m_ssa_ranges[v] = new (r) sbr_sparse_bitmap (TREE_TYPE (name),
-						       m_irange_allocator,
+						       m_range_allocator,
 						       &m_bitmaps);
 	}
       else
 	{
 	  // Otherwise use the default vector implemntation.
-	  void *r = m_irange_allocator->get_memory (sizeof (sbr_vector));
+	  void *r = m_range_allocator->alloc (sizeof (sbr_vector));
 	  m_ssa_ranges[v] = new (r) sbr_vector (TREE_TYPE (name),
-						m_irange_allocator);
+						m_range_allocator);
 	}
     }
   return m_ssa_ranges[v]->set_bb_range (bb, r);
@@ -467,7 +470,7 @@ block_range_cache::dump (FILE *f, basic_block bb, bool print_varying)
 ssa_global_cache::ssa_global_cache ()
 {
   m_tab.create (0);
-  m_irange_allocator = new irange_allocator;
+  m_range_allocator = new vrange_allocator;
 }
 
 // Deconstruct a global cache.
@@ -475,7 +478,7 @@ ssa_global_cache::ssa_global_cache ()
 ssa_global_cache::~ssa_global_cache ()
 {
   m_tab.release ();
-  delete m_irange_allocator;
+  delete m_range_allocator;
 }
 
 // Retrieve the global range of NAME from cache memory if it exists. 
@@ -509,7 +512,7 @@ ssa_global_cache::set_global_range (tree name, const irange &r)
   if (m && m->fits_p (r))
     *m = r;
   else
-    m_tab[v] = m_irange_allocator->allocate (r);
+    m_tab[v] = m_range_allocator->clone (r);
   return m != NULL;
 }
 
diff --git a/gcc/gimple-range-cache.h b/gcc/gimple-range-cache.h
index d56e56c201cf9c53b65dd96c3994fc4c5c98200a..73d12f35abe8200cfaed80144ef918edda9eb933 100644
--- a/gcc/gimple-range-cache.h
+++ b/gcc/gimple-range-cache.h
@@ -44,7 +44,7 @@ private:
   vec<class ssa_block_ranges *> m_ssa_ranges;
   ssa_block_ranges &get_block_ranges (tree name);
   ssa_block_ranges *query_block_ranges (tree name);
-  irange_allocator *m_irange_allocator;
+  vrange_allocator *m_range_allocator;
   bitmap_obstack m_bitmaps;
 };
 
@@ -64,7 +64,7 @@ public:
   void dump (FILE *f = stderr);
 private:
   vec<irange *> m_tab;
-  class irange_allocator *m_irange_allocator;
+  vrange_allocator *m_range_allocator;
 };
 
 // This class provides all the caches a global ranger may need, and makes 
diff --git a/gcc/gimple-range-edge.cc b/gcc/gimple-range-edge.cc
index 5bbe23ae03d9cb0b759a98a15384a815aa6fbf88..5264e627c9a5993fb618d5fc4e5167b4ba3bc50d 100644
--- a/gcc/gimple-range-edge.cc
+++ b/gcc/gimple-range-edge.cc
@@ -166,13 +166,13 @@ gimple_outgoing_range::calc_switch_ranges (gswitch *sw)
       // If there was an existing range and it doesn't fit, we lose the memory.
       // It'll get reclaimed when the obstack is freed.  This seems less
       // intrusive than allocating max ranges for each case.
-      slot = m_range_allocator.allocate (case_range);
+      slot = m_range_allocator.clone <irange> (case_range);
     }
 
   irange *&slot = m_edge_table->get_or_insert (default_edge, &existed);
   // This should be the first call into this switch.
   gcc_checking_assert (!existed);
-  irange *dr = m_range_allocator.allocate (default_range);
+  irange *dr = m_range_allocator.clone <irange> (default_range);
   slot = dr;
 }
 
diff --git a/gcc/gimple-range-edge.h b/gcc/gimple-range-edge.h
index c131b3309ccfbce0b42f73dea6f845ef95c8a8a0..ce383b0aa6fd1e99841388bb0db9a38c89fd3712 100644
--- a/gcc/gimple-range-edge.h
+++ b/gcc/gimple-range-edge.h
@@ -47,7 +47,7 @@ private:
 
   int m_max_edges;
   hash_map<edge, irange *> *m_edge_table;
-  irange_allocator m_range_allocator;
+  vrange_allocator m_range_allocator;
 };
 
 // If there is a range control statement at the end of block BB, return it.
diff --git a/gcc/gimple-range-infer.cc b/gcc/gimple-range-infer.cc
index 545d4f2de3d562675014e1c2bd65568f96d36d86..14ddfb803d81b427f5742b220d9b49e36524818d 100644
--- a/gcc/gimple-range-infer.cc
+++ b/gcc/gimple-range-infer.cc
@@ -189,8 +189,10 @@ infer_range_manager::get_nonzero (tree name)
     m_nonzero.safe_grow_cleared (num_ssa_names + 20);
   if (!m_nonzero[v])
     {
-      m_nonzero[v] = m_range_allocator.allocate (2);
-      m_nonzero[v]->set_nonzero (TREE_TYPE (name));
+      tree type = TREE_TYPE (name);
+      m_nonzero[v]
+	= static_cast <irange *> (m_range_allocator.alloc_vrange (type));
+      m_nonzero[v]->set_nonzero (type);
     }
   return *(m_nonzero[v]);
 }
@@ -259,14 +261,17 @@ infer_range_manager::add_range (tree name, basic_block bb, const irange &r)
       if (ptr->range->fits_p (cur))
 	*(ptr->range) = cur;
       else
-	ptr->range = m_range_allocator.allocate (cur);
+	{
+	  vrange &v = cur;
+	  ptr->range = static_cast <irange *> (m_range_allocator.clone (v));
+	}
       return;
     }
 
   // Otherwise create a record.
   bitmap_set_bit (m_on_exit[bb->index].m_names, SSA_NAME_VERSION (name));
   ptr = (exit_range *)obstack_alloc (&m_list_obstack, sizeof (exit_range));
-  ptr->range = m_range_allocator.allocate (r);
+  ptr->range = m_range_allocator.clone (r);
   ptr->name = name;
   ptr->next = m_on_exit[bb->index].head;
   m_on_exit[bb->index].head = ptr;
diff --git a/gcc/gimple-range-infer.h b/gcc/gimple-range-infer.h
index 412958fe28ea313557c79cfedf1538694d950715..65f6e83809d03e11369c29b038fe434094445bc4 100644
--- a/gcc/gimple-range-infer.h
+++ b/gcc/gimple-range-infer.h
@@ -78,7 +78,7 @@ private:
   bitmap m_seen;
   bitmap_obstack m_bitmaps;
   struct obstack m_list_obstack;
-  irange_allocator m_range_allocator;
+  vrange_allocator m_range_allocator;
 };
 
 #endif // GCC_GIMPLE_RANGE_SIDE_H
diff --git a/gcc/value-range.h b/gcc/value-range.h
index b7ea8c76f87490c1180007dab0b46e6c85ee7b73..5cd0e0ef76a7b6faeb33c9a6b949f289f25aff3e 100644
--- a/gcc/value-range.h
+++ b/gcc/value-range.h
@@ -92,7 +92,7 @@ protected:
 
 class GTY((user)) irange : public vrange
 {
-  friend class irange_allocator;
+  friend class vrange_allocator;
 public:
   // In-place setters.
   virtual void set (tree, tree, value_range_kind = VR_RANGE) override;
@@ -897,56 +897,63 @@ vrp_val_min (const_tree type)
   return NULL_TREE;
 }
 
-// This is the irange storage class.  It is used to allocate the
-// minimum amount of storage needed for a given irange.  Storage is
-// automatically freed at destruction of the storage class.
-//
-// It is meant for long term storage, as opposed to int_range_max
-// which is meant for intermediate temporary results on the stack.
-//
-// The newly allocated irange is initialized to the empty set
-// (undefined_p() is true).
+// This is the range storage class.  It is used to allocate the
+// minimum amount of storage needed for a given range.  Storage is
+// automatically freed at destruction of the class.
 
-class irange_allocator
+class vrange_allocator
 {
 public:
-  irange_allocator ();
-  ~irange_allocator ();
-  // Return a new range with NUM_PAIRS.
-  irange *allocate (unsigned num_pairs);
-  // Return a copy of SRC with the minimum amount of sub-ranges needed
-  // to represent it.
-  irange *allocate (const irange &src);
-  void *get_memory (unsigned num_bytes);
+  vrange_allocator ();
+  ~vrange_allocator ();
+  // Allocate a range of TYPE.
+  vrange *alloc_vrange (tree type);
+  // Allocate a memory block of BYTES.
+  void *alloc (unsigned bytes);
+  // Return a clone of SRC.
+  template <typename T> T *clone (const T &src);
 private:
-  DISABLE_COPY_AND_ASSIGN (irange_allocator);
+  irange *alloc_irange (unsigned pairs);
+  DISABLE_COPY_AND_ASSIGN (vrange_allocator);
   struct obstack m_obstack;
 };
 
 inline
-irange_allocator::irange_allocator ()
+vrange_allocator::vrange_allocator ()
 {
   obstack_init (&m_obstack);
 }
 
 inline
-irange_allocator::~irange_allocator ()
+vrange_allocator::~vrange_allocator ()
 {
   obstack_free (&m_obstack, NULL);
 }
 
 // Provide a hunk of memory from the obstack.
+
 inline void *
-irange_allocator::get_memory (unsigned num_bytes)
+vrange_allocator::alloc (unsigned bytes)
 {
-  void *r = obstack_alloc (&m_obstack, num_bytes);
-  return r;
+  return obstack_alloc (&m_obstack, bytes);
+}
+
+// Return a new range to hold ranges of TYPE.  The newly allocated
+// range is initialized to VR_UNDEFINED.
+
+inline vrange *
+vrange_allocator::alloc_vrange (tree type)
+{
+  if (irange::supports_type_p (type))
+    return alloc_irange (2);
+
+  gcc_unreachable ();
 }
 
 // Return a new range with NUM_PAIRS.
 
 inline irange *
-irange_allocator::allocate (unsigned num_pairs)
+vrange_allocator::alloc_irange (unsigned num_pairs)
 {
   // Never allocate 0 pairs.
   // Don't allocate 1 either, or we get legacy value_range's.
@@ -956,17 +963,32 @@ irange_allocator::allocate (unsigned num_pairs)
   size_t nbytes = sizeof (tree) * 2 * num_pairs;
 
   // Allocate the irange and required memory for the vector.
-  void *r = obstack_alloc (&m_obstack, sizeof (irange));
-  tree *mem = (tree *) obstack_alloc (&m_obstack, nbytes);
+  void *r = alloc (sizeof (irange));
+  tree *mem = static_cast <tree *> (alloc (nbytes));
   return new (r) irange (mem, num_pairs);
 }
 
+// Return a clone of an irange.
+
+template <>
 inline irange *
-irange_allocator::allocate (const irange &src)
+vrange_allocator::clone <irange> (const irange &src)
 {
-  irange *r = allocate (src.num_pairs ());
+  irange *r = alloc_irange (src.num_pairs ());
   *r = src;
   return r;
 }
 
+// Return a clone of a vrange.
+
+template <>
+inline vrange *
+vrange_allocator::clone <vrange> (const vrange &src)
+{
+  if (is_a <irange> (src))
+    return clone <irange> (as_a <irange> (src));
+
+  gcc_unreachable ();
+}
+
 #endif // GCC_VALUE_RANGE_H