PPL  1.2
Bit_Row.cc
Go to the documentation of this file.
1 /* Bit_Row class implementation (non-inline functions).
2  Copyright (C) 2001-2010 Roberto Bagnara <bagnara@cs.unipr.it>
3  Copyright (C) 2010-2016 BUGSENG srl (http://bugseng.com)
4 
5 This file is part of the Parma Polyhedra Library (PPL).
6 
7 The PPL is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3 of the License, or (at your
10 option) any later version.
11 
12 The PPL is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software Foundation,
19 Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1307, USA.
20 
21 For the most up-to-date information see the Parma Polyhedra Library
22 site: http://bugseng.com/products/ppl/ . */
23 
24 #include "ppl-config.h"
25 #include "Bit_Row_defs.hh"
26 #include "assertions.hh"
27 #include "C_Integer.hh"
28 
29 namespace PPL = Parma_Polyhedra_Library;
30 
31 unsigned long
33  const mp_size_t vec_size = vec->_mp_size;
34  PPL_ASSERT(vec_size >= 0);
35  mp_srcptr p = vec->_mp_d;
36  for (mp_size_t li = 0; li < vec_size; ++li, ++p) {
37  const mp_limb_t limb = *p;
38  if (limb != 0) {
39  return static_cast<unsigned long>(li) * PPL_BITS_PER_GMP_LIMB
41  }
42  }
44 }
45 
46 unsigned long
47 PPL::Bit_Row::next(unsigned long position) const {
48  ++position;
49 
50  /*
51  The alternative implementation using the mpz_scan1() function
52  of GMP was measured to be slower that ours. Here it is, in
53  case mpz_scan1() is improved:
54 
55  <CODE>
56  unsigned long r = mpz_scan1(vec, position);
57  return (r == C_Integer<unsigned long>::max) ? -1 : r;
58  </CODE>
59  */
60 
61  const unsigned long uli = position / PPL_BITS_PER_GMP_LIMB;
62  mp_size_t li = static_cast<mp_size_t>(uli);
63  const mp_size_t vec_size = vec->_mp_size;
64  PPL_ASSERT(vec_size >= 0);
65  if (li >= vec_size) {
67  }
68 
69  // Get the first limb.
70  mp_srcptr p = vec->_mp_d + li;
71 
72  // Mask off any bits before `position' in the first limb.
73  mp_limb_t limb
74  = *p & ((~static_cast<mp_limb_t>(0)) << (position % PPL_BITS_PER_GMP_LIMB));
75 
76  while (true) {
77  if (limb != 0) {
78  return static_cast<unsigned long>(li) * PPL_BITS_PER_GMP_LIMB
80  }
81  ++li;
82  if (li == vec_size) {
83  break;
84  }
85  ++p;
86  limb = *p;
87  }
89 }
90 
91 unsigned long
93  mp_size_t li = vec->_mp_size;
94  PPL_ASSERT(li >= 0);
95  if (li == 0) {
97  }
98  --li;
99  const mp_srcptr p = vec->_mp_d + li;
100  const mp_limb_t limb = *p;
101  PPL_ASSERT(limb != 0);
102  return static_cast<unsigned long>(li) * PPL_BITS_PER_GMP_LIMB
103  + Implementation::last_one(limb);
104 }
105 
106 unsigned long
107 PPL::Bit_Row::prev(unsigned long position) const {
108  if (position == 0) {
110  }
111 
112  --position;
113 
114  const mp_size_t vec_size = vec->_mp_size;
115  PPL_ASSERT(vec_size > 0);
116  const unsigned long uli = position / PPL_BITS_PER_GMP_LIMB;
117  mp_size_t li = static_cast<mp_size_t>(uli);
118 
119  mp_limb_t limb;
120  mp_srcptr p = vec->_mp_d;
121 
122  // Get the first limb.
123  if (li >= vec_size) {
124  li = vec_size - 1;
125  p += li;
126  limb = *p;
127  }
128  else {
129  const mp_limb_t mask
130  = (~static_cast<mp_limb_t>(0))
131  >> (PPL_BITS_PER_GMP_LIMB - 1U - position % PPL_BITS_PER_GMP_LIMB);
132  p += li;
133  limb = *p & mask;
134  }
135 
136  while (true) {
137  if (limb != 0) {
138  return static_cast<unsigned long>(li) * PPL_BITS_PER_GMP_LIMB
139  + Implementation::last_one(limb);
140  }
141  if (li == 0) {
142  break;
143  }
144  --li;
145  --p;
146  limb = *p;
147  }
149 }
150 
151 bool
152 PPL::Bit_Row::operator[](const unsigned long k) const {
153  const mp_size_t vec_size = vec->_mp_size;
154  PPL_ASSERT(vec_size >= 0);
155 
156  const unsigned long i = k / static_cast<unsigned long>(GMP_NUMB_BITS);
157  if (i >= static_cast<unsigned long>(vec_size)) {
158  return false;
159  }
160 
161  const mp_limb_t limb = *(vec->_mp_d + i);
162  return ((limb >> (k % static_cast<unsigned long>(GMP_NUMB_BITS))) & 1U) != 0;
163 }
164 
165 void
166 PPL::Bit_Row::set_until(unsigned long k) {
167  // FIXME, TODO: this is an inefficient implementation.
168  while (k-- > 0) {
169  mpz_setbit(vec, k);
170  }
171 }
172 
174 int
175 PPL::compare(const Bit_Row& x, const Bit_Row& y) {
176  const mp_size_t x_size = x.vec->_mp_size;
177  PPL_ASSERT(x_size >= 0);
178  const mp_size_t y_size = y.vec->_mp_size;
179  PPL_ASSERT(y_size >= 0);
180  mp_size_t size = ((x_size > y_size) ? y_size : x_size);
181  mp_srcptr xp = x.vec->_mp_d;
182  mp_srcptr yp = y.vec->_mp_d;
183  while (size > 0) {
184  const mp_limb_t xl = *xp;
185  const mp_limb_t yl = *yp;
186  if (xl != yl) {
187  // Get the ones where they are different.
188  const mp_limb_t diff = xl ^ yl;
189  // First bit that is different.
190  const mp_limb_t mask = diff & ~(diff-1);
191  return ((xl & mask) != 0) ? 1 : -1;
192  }
193  ++xp;
194  ++yp;
195  --size;
196  }
197  return (x_size == y_size) ? 0 : ((x_size > y_size) ? 1 : -1);
198 }
199 
201 bool
202 PPL::subset_or_equal(const Bit_Row& x, const Bit_Row& y) {
203  mp_size_t x_size = x.vec->_mp_size;
204  PPL_ASSERT(x_size >= 0);
205  const mp_size_t y_size = y.vec->_mp_size;
206  PPL_ASSERT(y_size >= 0);
207  if (x_size > y_size) {
208  return false;
209  }
210  mp_srcptr xp = x.vec->_mp_d;
211  mp_srcptr yp = y.vec->_mp_d;
212  while (x_size > 0) {
213  if ((*xp & ~*yp) != 0) {
214  return false;
215  }
216  ++xp;
217  ++yp;
218  --x_size;
219  }
220  return true;
221 }
222 
224 bool
225 PPL::subset_or_equal(const Bit_Row& x, const Bit_Row& y,
226  bool& strict_subset) {
227  mp_size_t x_size = x.vec->_mp_size;
228  PPL_ASSERT(x_size >= 0);
229  const mp_size_t y_size = y.vec->_mp_size;
230  PPL_ASSERT(y_size >= 0);
231  if (x_size > y_size) {
232  return false;
233  }
234  mp_srcptr xp = x.vec->_mp_d;
235  mp_srcptr yp = y.vec->_mp_d;
236  strict_subset = (x_size < y_size);
237  mp_limb_t xl;
238  mp_limb_t yl;
239  if (strict_subset) {
240  while (x_size > 0) {
241  xl = *xp;
242  yl = *yp;
243  if ((xl & ~yl) != 0) {
244  return false;
245  }
246  strict_subset_next:
247  ++xp;
248  ++yp;
249  --x_size;
250  }
251  }
252  else {
253  while (x_size > 0) {
254  xl = *xp;
255  yl = *yp;
256  if (xl != yl) {
257  if ((xl & ~yl) != 0) {
258  return false;
259  }
260  strict_subset = true;
261  goto strict_subset_next;
262  }
263  ++xp;
264  ++yp;
265  --x_size;
266  }
267  }
268  return true;
269 }
270 
272 bool
273 PPL::strict_subset(const Bit_Row& x, const Bit_Row& y) {
274  mp_size_t x_size = x.vec->_mp_size;
275  PPL_ASSERT(x_size >= 0);
276  const mp_size_t y_size = y.vec->_mp_size;
277  PPL_ASSERT(y_size >= 0);
278  if (x_size > y_size) {
279  return false;
280  }
281  bool different = (x_size < y_size);
282  mp_srcptr xp = x.vec->_mp_d;
283  mp_srcptr yp = y.vec->_mp_d;
284  while (x_size > 0) {
285  const mp_limb_t xl = *xp;
286  const mp_limb_t yl = *yp;
287  if ((xl & ~yl) != 0) {
288  return false;
289  }
290  if (!different && xl != yl) {
291  different = true;
292  }
293  ++xp;
294  ++yp;
295  --x_size;
296  }
297  return different;
298 }
299 
301 bool
302 PPL::operator==(const Bit_Row& x, const Bit_Row& y) {
303  const mp_size_t x_vec_size = x.vec->_mp_size;
304  PPL_ASSERT(x_vec_size >= 0);
305  const mp_size_t y_vec_size = y.vec->_mp_size;
306  PPL_ASSERT(y_vec_size >= 0);
307 
308  if (x_vec_size != y_vec_size) {
309  return false;
310  }
311 
312  return mpn_cmp(x.vec->_mp_d, y.vec->_mp_d, x_vec_size) == 0;
313 }
314 
316 bool
317 PPL::operator!=(const Bit_Row& x, const Bit_Row& y) {
318  const mp_size_t x_vec_size = x.vec->_mp_size;
319  PPL_ASSERT(x_vec_size >= 0);
320  const mp_size_t y_vec_size = y.vec->_mp_size;
321  PPL_ASSERT(y_vec_size >= 0);
322 
323  if (x_vec_size != y_vec_size) {
324  return true;
325  }
326 
327  return mpn_cmp(x.vec->_mp_d, y.vec->_mp_d, x_vec_size) != 0;
328 }
329 
330 bool
332  const mp_size_t vec_size = vec->_mp_size;
333  const mp_size_t vec_alloc = vec->_mp_alloc;
334  return vec_size >= 0
335  && vec_alloc >= vec_size
336  && (vec_size == 0 || mpz_getlimbn(vec, vec_size-1) != 0);
337 }
338 
339 void
341  mp_size_t y_size = y.vec->_mp_size;
342  mp_size_t z_size = z.vec->_mp_size;
343  PPL_ASSERT(y_size <= z_size);
344  PPL_ASSERT(vec->_mp_alloc >= z_size);
345  vec->_mp_size = z.vec->_mp_size;
346  mp_srcptr yp = y.vec->_mp_d;
347  mp_srcptr zp = z.vec->_mp_d;
348  mp_ptr p = vec->_mp_d;
349  z_size -= y_size;
350  while (y_size > 0) {
351  *p = *yp | * zp;
352  ++yp;
353  ++zp;
354  ++p;
355  --y_size;
356  }
357  while (z_size > 0) {
358  *p = *zp;
359  ++zp;
360  ++p;
361  --z_size;
362  }
363 }
void set_until(unsigned long k)
Sets bits up to position k (excluded).
Definition: Bit_Row.cc:166
bool operator!=(const Box< ITV > &x, const Box< ITV > &y)
Definition: Box_inlines.hh:264
bool OK() const
Checks if all the invariants are satisfied.
Definition: Bit_Row.cc:331
unsigned long last() const
Returns the index of the last set bit or ULONG_MAX if no bit is set.
Definition: Bit_Row.cc:92
unsigned long prev(unsigned long position) const
Returns the index of the first set bit before position or ULONG_MAX if no bits before position is set...
Definition: Bit_Row.cc:107
A row in a matrix of bits.
mpz_t vec
Bit-vector representing the row.
unsigned int last_one(unsigned int u)
Assuming u is nonzero, returns the index of the last set bit in u.
unsigned long first() const
Returns the index of the first set bit or ULONG_MAX if no bit is set.
Definition: Bit_Row.cc:32
int compare(const Linear_Expression &x, const Linear_Expression &y)
bool operator[](unsigned long k) const
Returns the truth value corresponding to the bit in position k.
Definition: Bit_Row.cc:152
The entire library is confined to this namespace.
Definition: version.hh:61
#define PPL_BITS_PER_GMP_LIMB
bool operator==(const Box< ITV > &x, const Box< ITV > &y)
unsigned long next(unsigned long position) const
Returns the index of the first set bit after position or ULONG_MAX if no bit after position is set...
Definition: Bit_Row.cc:47
unsigned int first_one(unsigned int u)
Assuming u is nonzero, returns the index of the first set bit in u.
void union_helper(const Bit_Row &y, const Bit_Row &z)
Assigns to *this the union of y and z.
Definition: Bit_Row.cc:340