Direct-BT  2.3.1
Direct-BT - Direct Bluetooth Programming.
cow_vector.hpp
Go to the documentation of this file.
1 /*
2  * Author: Sven Gothel <sgothel@jausoft.com>
3  * Copyright (c) 2020 Gothel Software e.K.
4  * Copyright (c) 2020 ZAFENA AB
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sublicense, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be
15  * included in all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #ifndef JAU_COW_VECTOR_HPP_
27 #define JAU_COW_VECTOR_HPP_
28 
29 #include <cstring>
30 #include <string>
31 #include <cstdint>
32 #include <limits>
33 #include <atomic>
34 #include <memory>
35 #include <mutex>
36 #include <condition_variable>
37 #include <vector>
38 #include <algorithm>
39 
40 #include <jau/cpp_lang_util.hpp>
41 #include <jau/debug.hpp>
42 #include <jau/basic_types.hpp>
43 #include <jau/ordered_atomic.hpp>
44 #include <jau/cow_iterator.hpp>
45 
46 namespace jau {
47 
48  /**
49  * Implementation of a Copy-On-Write (CoW) using std::vector as the underlying storage,
50  * exposing <i>lock-free</i> read operations using SC-DRF atomic synchronization.
51  * <p>
52  * This class shall be compliant with <i>C++ named requirements for Container</i>.
53  * </p>
54  * <p>
55  * The vector's store is owned using a shared reference to the data structure,
56  * allowing its replacement on Copy-On-Write (CoW).
57  * </p>
58  * <p>
59  * Writing to the store utilizes a mutex lock to avoid data races
60  * on the instances' write operations only, leaving read operations <i>lock-free</i>.<br>
61  * Write operations replace the store reference with a new instance using
62  * jau::sc_atomic_critical to synchronize with read operations.
63  * </p>
64  * <p>
65  * Reading from the store is <i>lock-free</i> and accesses the store reference using
66  * jau::sc_atomic_critical to synchronizing with write operations.
67  * </p>
68  * <p>
69  * Immutable storage const_iterators are supported via jau::cow_ro_iterator,
70  * which are constructed <i>lock-free</i>.<br>
71  * jau::cow_ro_iterator hold a snapshot retrieved via jau::cow_vector::snapshot()
72  * until its destruction.
73  * </p>
74  * <p>
75  * Mutable storage iterators are supported via jau::cow_rw_iterator,
76  * which holds a copy of this CoW storage and locks its write mutex until
77  * jau::cow_rw_iterator::write_back() or its destruction.<br>
78  * After completing all mutable operations but before this iterator's destruction,
79  * the user might want to write back this iterators' storage to this CoW
80  * using jau::cow_rw_iterator::write_back().
81  * </p>
82  * <p>
83  * Index operation via ::operator[](size_type) or ::at(size_type) are not supported,
84  * since they would be only valid if value_type itself is a std::shared_ptr
85  * and hence prohibit the destruction of the object if mutating the storage,
86  * e.g. via jau::cow_vector::push_back().
87  * </p>
88  * <p>
89  * Custom mutable write operations are also supported via
90  * jau::cow_vector::get_write_mutex(), jau::cow_vector::copy_store() and jau::cow_vector::set_store().<br>
91  * See example in jau::cow_vector::set_store()
92  * </p>
93  * See also:
94  * <pre>
95  * - Sequentially Consistent (SC) ordering or SC-DRF (data race free) <https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering>
96  * - std::memory_order <https://en.cppreference.com/w/cpp/atomic/memory_order>
97  * </pre>
98  * \deprecated jau::cow_vector will be retired, use jau::cow_darray and potentially jau::darray.
99  * @see jau::cow_darray
100  * @see jau::cow_ro_iterator
101  * @see jau::for_each_fidelity
102  * @see jau::cow_rw_iterator
103  * @see jau::cow_rw_iterator::write_back()
104  */
105  template <typename Value_type, typename Alloc_type = std::allocator<Value_type>>
107  {
108  public:
109  // typedefs' for C++ named requirements: Container
110 
111  typedef Value_type value_type;
112  typedef value_type* pointer;
113  typedef const value_type* const_pointer;
115  typedef const value_type& const_reference;
116  typedef std::size_t size_type;
117  typedef typename std::make_signed<size_type>::type difference_type;
118  typedef Alloc_type allocator_type;
119 
120  typedef std::vector<value_type, allocator_type> storage_t;
121  typedef std::shared_ptr<storage_t> storage_ref_t;
122 
124 
125  /**
126  * @see jau::cow_darray::const_iterator
127  * @see jau::cow_ro_iterator
128  */
130 
131  /**
132  * @see jau::cow_darray::iterator
133  * @see jau::cow_rw_iterator
134  */
136 
137  private:
138  static constexpr size_type DIFF_MAX = std::numeric_limits<difference_type>::max();
139 
140  storage_ref_t store_ref;
141  mutable sc_atomic_bool sync_atomic;
142  mutable std::recursive_mutex mtx_write;
143 
144  public:
145  // ctor
146 
147  constexpr cow_vector() noexcept
148  : store_ref( std::make_shared<storage_t>() ), sync_atomic(false) {}
149 
150  constexpr explicit cow_vector(const allocator_type & a) noexcept
151  : store_ref( std::make_shared<storage_t>(a) ), sync_atomic(false) { }
152 
153  constexpr explicit cow_vector(size_type n, const allocator_type& a = allocator_type())
154  : store_ref( std::make_shared<storage_t>(n, a) ), sync_atomic(false) { }
155 
156  constexpr cow_vector(size_type n, const value_type& value, const allocator_type& a = allocator_type())
157  : store_ref( std::make_shared<storage_t>(n, value, a) ), sync_atomic(false) { }
158 
159  constexpr explicit cow_vector(const storage_t& x)
160  : store_ref( std::make_shared<storage_t>(x, x->get_allocator()) ), sync_atomic(false) { }
161 
164  : sync_atomic(false) {
165  storage_ref_t x_store_ref;
166  {
167  sc_atomic_critical sync_x( x.sync_atomic );
168  x_store_ref = x.store_ref;
169  }
170  store_ref = std::make_shared<storage_t>( *x_store_ref, x_store_ref->get_allocator() );
171  }
172 
173  /**
174  * Like std::vector::operator=(&), assignment
175  * <p>
176  * This write operation uses a mutex lock and is blocking this instances' write operations only.
177  * </p>
178  */
180  std::lock_guard<std::recursive_mutex> lock(mtx_write);
181  storage_ref_t x_store_ref;
182  {
183  sc_atomic_critical sync_x( x.sync_atomic );
184  x_store_ref = x.store_ref;
185  }
186  storage_ref_t new_store_ref = std::make_shared<storage_t>( *x_store_ref, x_store_ref->get_allocator() );
187  {
188  sc_atomic_critical sync(sync_atomic);
189  store_ref = std::move(new_store_ref);
190  }
191  return *this;
192  }
193 
195  cow_vector(cow_vector && x) noexcept {
196  // Strategy-1: Acquire lock, blocking
197  // - If somebody else holds the lock, we wait.
198  // - Then we own the lock
199  // - Post move-op, the source object does not exist anymore
200  std::unique_lock<std::recursive_mutex> lock(x.mtx_write); // *this doesn't exist yet, not locking ourselves
201  {
202  store_ref = std::move(x.store_ref);
203  // sync_atomic = std::move(x.sync_atomic);
204  // mtx_write will be a fresh one, but we hold the source's lock
205 
206  // Moved source array has been taken over, null its store_ref
207  x.store_ref = nullptr;
208  }
209  }
210 
211  /**
212  * Like std::vector::operator=(&&), move.
213  * <p>
214  * This write operation uses a mutex lock and is blocking both cow_vector instance's write operations.
215  * </p>
216  */
219  // Strategy-2: Acquire locks of both, blocking
220  // - If somebody else holds the lock, we wait.
221  // - Then we own the lock for both instances
222  // - Post move-op, the source object does not exist anymore
223  std::unique_lock<std::recursive_mutex> lock1(x.mtx_write, std::defer_lock); // utilize std::lock(r, w), allowing mixed order waiting on read/write ops
224  std::unique_lock<std::recursive_mutex> lock2( mtx_write, std::defer_lock); // otherwise RAII-style relinquish via destructor
225  std::lock(lock1, lock2);
226  {
227  sc_atomic_critical sync_x( x.sync_atomic );
228  sc_atomic_critical sync ( sync_atomic );
229  store_ref = std::move(x.store_ref);
230  // mtx_write and the atomic will be kept as is, but we hold the source's lock
231 
232  // Moved source array has been taken over, null its store_ref
233  x.store_ref = nullptr;
234  }
235  return *this;
236  }
237 
238  /**
239  * Creates a new instance,
240  * copying all elements from the given template input-iterator value_type range [first, last).<br>
241  * Size will equal the range [first, last), i.e. <code>size_type(last-first)</code>.
242  * @tparam InputIt template input-iterator custom type
243  * @param first template input-iterator to first element of value_type range [first, last)
244  * @param last template input-iterator to last element of value_type range [first, last)
245  * @param alloc custom allocator_type instance
246  */
247  template< class InputIt >
248  constexpr cow_vector(InputIt first, InputIt last, const allocator_type& alloc = allocator_type())
249  : store_ref(std::make_shared<storage_t>(first, last, alloc)), sync_atomic(false)
250  { }
251 
252  /**
253  * Create a new instance from an initializer list.
254  *
255  * @param initlist initializer_list.
256  * @param alloc allocator
257  */
258  constexpr cow_vector(std::initializer_list<value_type> initlist, const allocator_type& alloc = allocator_type())
259  : store_ref(std::make_shared<storage_t>(initlist, alloc)), sync_atomic(false)
260  { }
261 
262  ~cow_vector() noexcept { }
263 
264  /**
265  * Returns <code>std::numeric_limits<difference_type>::max()</code> as the maximum array size.
266  * <p>
267  * We rely on the signed <code>difference_type</code> for pointer arithmetic,
268  * deducing ranges from iterator.
269  * </p>
270  */
271  constexpr size_type max_size() const noexcept { return DIFF_MAX; }
272 
273  // cow_vector features
274 
275  /**
276  * Returns this instances' recursive write mutex, allowing user to
277  * implement more complex mutable write operations.
278  * <p>
279  * See example in jau::cow_vector::set_store()
280  * </p>
281  *
282  * @see jau::cow_vector::get_write_mutex()
283  * @see jau::cow_vector::copy_store()
284  * @see jau::cow_vector::set_store()
285  */
286  constexpr std::recursive_mutex & get_write_mutex() noexcept { return mtx_write; }
287 
288  /**
289  * Returns a new shared_ptr copy of the underlying store,
290  * i.e. using a new copy-constructed vectore.
291  * <p>
292  * See example in jau::cow_vector::set_store()
293  * </p>
294  * <p>
295  * This special operation uses a mutex lock and is blocking this instances' write operations only.
296  * </p>
297  * @see jau::cow_vector::get_write_mutex()
298  * @see jau::cow_vector::copy_store()
299  * @see jau::cow_vector::set_store()
300  */
303  std::lock_guard<std::recursive_mutex> lock(mtx_write);
304  return std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
305  }
306 
307  /**
308  * Special case facility allowing the user to replace the current store
309  * with the given value, potentially acquired via jau::cow_vector::copy_store()
310  * and mutated while holding the jau::cow_vector::get_write_mutex() lock.
311  * <p>
312  * This is a move operation, i.e. the given new_store_ref is invalid on the caller side
313  * after this operation. <br>
314  * User shall pass the store via std::move()
315  * <pre>
316  * cow_vector<std::shared_ptr<Thing>> list;
317  * ...
318  * {
319  * std::lock_guard<std::recursive_mutex> lock(list.get_write_mutex());
320  * std::shared_ptr<std::vector<std::shared_ptr<Thing>>> snapshot = list.copy_store();
321  * ...
322  * some fancy mutation
323  * ...
324  * list.set_store(std::move(snapshot));
325  * }
326  * </pre>
327  * </p>
328  * @param new_store_ref the user store to be moved here, replacing the current store.
329  *
330  * @see jau::cow_vector::get_write_mutex()
331  * @see jau::cow_vector::copy_store()
332  * @see jau::cow_vector::set_store()
333  */
335  void set_store(storage_ref_t && new_store_ref) noexcept {
336  std::lock_guard<std::recursive_mutex> lock(mtx_write);
337  sc_atomic_critical sync(sync_atomic);
338  store_ref = std::move( new_store_ref );
339  }
340 
341  /**
342  * Returns the current snapshot of the underlying shared std::vector<T> reference.
343  * <p>
344  * Note that this snapshot will be outdated by the next (concurrent) write operation.<br>
345  * The returned referenced vector is still valid and not mutated,
346  * but does not represent the current content of this cow_vector instance.
347  * </p>
348  * <p>
349  * This read operation is <i>lock-free</i>.
350  * </p>
351  * @see jau::for_each_cow
352  */
354  storage_ref_t snapshot() const noexcept {
355  sc_atomic_critical sync( sync_atomic );
356  return store_ref;
357  }
358 
359  // const_iterator, non mutable, read-only
360 
361  // Removed for clarity: "constexpr const_iterator begin() const noexcept"
362 
363  /**
364  * See description in jau::cow_darray::cbegin()
365  */
366  constexpr const_iterator cbegin() const noexcept {
367  return const_iterator(snapshot(), store_ref->cbegin());
368  }
369 
370  // iterator, mutable, read-write
371 
372  /**
373  * See description in jau::cow_darray::begin()
374  */
375  constexpr iterator begin() {
376  return iterator(*this);
377  }
378 
379  // read access
380 
381  allocator_type get_allocator() const noexcept {
382  sc_atomic_critical sync( sync_atomic );
383  return store_ref->get_allocator();
384  }
385 
387  size_type capacity() const noexcept {
388  sc_atomic_critical sync( sync_atomic );
389  return store_ref->capacity();
390  }
391 
392  /**
393  * Like std::vector::empty().
394  * <p>
395  * This read operation is <i>lock-free</i>.
396  * </p>
397  */
399  bool empty() const noexcept {
400  sc_atomic_critical sync( sync_atomic );
401  return store_ref->empty();
402  }
403 
404  /**
405  * Like std::vector::size().
406  * <p>
407  * This read operation is <i>lock-free</i>.
408  * </p>
409  */
411  size_type size() const noexcept {
412  sc_atomic_critical sync( sync_atomic );
413  return store_ref->size();
414  }
415 
416  // write access
417 
418  void reserve(size_type new_capacity) {
419  std::lock_guard<std::recursive_mutex> lock(mtx_write);
420  storage_ref_t old_store_ref = store_ref;
421  if( new_capacity > old_store_ref->capacity() ) {
422  storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->get_allocator() );
423  new_store_ref->reserve(new_capacity);
424  sc_atomic_critical sync( sync_atomic );
425  store_ref = std::move(new_store_ref);
426  }
427  }
428 
429  /**
430  * Like std::vector::clear(), but ending with zero capacity.
431  * <p>
432  * This write operation uses a mutex lock and is blocking this instances' write operations.
433  * </p>
434  */
436  void clear() noexcept {
437  std::lock_guard<std::recursive_mutex> lock(mtx_write);
438  storage_ref_t new_store_ref = std::make_shared<storage_t>();
439  {
440  sc_atomic_critical sync(sync_atomic);
441  store_ref = std::move(new_store_ref);
442  }
443  }
444 
445  /**
446  * Like std::vector::swap().
447  * <p>
448  * This write operation uses a mutex lock and is blocking both cow_vector instance's write operations.
449  * </p>
450  */
452  void swap(cow_vector& x) noexcept {
453  std::unique_lock<std::recursive_mutex> lock(mtx_write, std::defer_lock); // utilize std::lock(a, b), allowing mixed order waiting on either object
454  std::unique_lock<std::recursive_mutex> lock_x(x.mtx_write, std::defer_lock); // otherwise RAII-style relinquish via destructor
455  std::lock(lock, lock_x);
456  {
457  sc_atomic_critical sync_x( x.sync_atomic );
458  sc_atomic_critical sync(sync_atomic);
459  storage_ref_t x_store_ref = x.store_ref;
460  x.store_ref = store_ref;
461  store_ref = x_store_ref;
462  }
463  }
464 
465  /**
466  * Like std::vector::pop_back().
467  * <p>
468  * This write operation uses a mutex lock and is blocking this instances' write operations only.
469  * </p>
470  */
472  void pop_back() noexcept {
473  std::lock_guard<std::recursive_mutex> lock(mtx_write);
474  storage_ref_t old_store_ref = store_ref;
475  if( 0 < old_store_ref->size() ) {
476  storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->get_allocator() );
477  new_store_ref->pop_back();
478  {
479  sc_atomic_critical sync(sync_atomic);
480  store_ref = std::move(new_store_ref);
481  }
482  }
483  }
484 
485  /**
486  * Like std::vector::push_back(), copy
487  * <p>
488  * This write operation uses a mutex lock and is blocking this instances' write operations only.
489  * </p>
490  * @param x the value to be added at the tail.
491  */
493  void push_back(const value_type& x) {
494  std::lock_guard<std::recursive_mutex> lock(mtx_write);
495  storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
496  new_store_ref->push_back(x);
497  {
498  sc_atomic_critical sync(sync_atomic);
499  store_ref = std::move(new_store_ref);
500  }
501  }
502 
503  /**
504  * Like std::vector::push_back(), move
505  * <p>
506  * This write operation uses a mutex lock and is blocking this instances' write operations only.
507  * </p>
508  */
510  void push_back(value_type&& x) {
511  std::lock_guard<std::recursive_mutex> lock(mtx_write);
512  storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
513  new_store_ref->push_back( std::move(x) );
514  {
515  sc_atomic_critical sync(sync_atomic);
516  store_ref = std::move(new_store_ref);
517  }
518  }
519 
520  /**
521  * Like std::vector::emplace_back(), construct a new element in place at the end().
522  * <p>
523  * Constructs the element at the end() using placement new.
524  * </p>
525  * <p>
526  * size will be increased by one.
527  * </p>
528  * @param args arguments to forward to the constructor of the element
529  */
530  template<typename... Args>
532  reference emplace_back(Args&&... args) {
533  std::lock_guard<std::recursive_mutex> lock(mtx_write);
534  storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
535  reference res = new_store_ref->emplace_back( std::forward<Args>(args)... );
536  {
537  sc_atomic_critical sync(sync_atomic);
538  store_ref = std::move(new_store_ref);
539  }
540  return res;
541  }
542 
543  /**
544  * Generic value_type equal comparator to be user defined for e.g. jau::cow_vector::push_back_unique().
545  * @param a one element of the equality test.
546  * @param b the other element of the equality test.
547  * @return true if both are equal
548  */
549  typedef bool(*equal_comparator)(const value_type& a, const value_type& b);
550 
551  /**
552  * Like std::vector::push_back(), but only if the newly added element does not yet exist.
553  * <p>
554  * This write operation uses a mutex lock and is blocking this instances' write operations only.
555  * </p>
556  * <p>
557  * Examples
558  * <pre>
559  * static jau::cow_vector<Thing>::equal_comparator thingEqComparator =
560  * [](const Thing &a, const Thing &b) -> bool { return a == b; };
561  * ...
562  * jau::cow_vector<Thing> list;
563  *
564  * bool added = list.push_back_unique(new_element, thingEqComparator);
565  * ...
566  * cow_vector<std::shared_ptr<Thing>> listOfRefs;
567  * bool added = listOfRefs.push_back_unique(new_element,
568  * [](const std::shared_ptr<Thing> &a, const std::shared_ptr<Thing> &b) -> bool { return *a == *b; });
569  * </pre>
570  * </p>
571  * @param x the value to be added at the tail, if not existing yet.
572  * @param comparator the equal comparator to return true if both given elements are equal
573  * @return true if the element has been uniquely added, otherwise false
574  */
576  bool push_back_unique(const value_type& x, equal_comparator comparator) {
577  std::lock_guard<std::recursive_mutex> lock(mtx_write);
578  for(auto it = store_ref->begin(); it != store_ref->end(); ) {
579  if( comparator( *it, x ) ) {
580  return false; // already included
581  } else {
582  ++it;
583  }
584  }
585  push_back(x);
586  return true;
587  }
588 
589  /**
590  * Erase either the first matching element or all matching elements.
591  * <p>
592  * This write operation uses a mutex lock and is blocking this instances' write operations only.
593  * </p>
594  * <p>
595  * Examples
596  * <pre>
597  * cow_vector<Thing> list;
598  * int count = list.erase_matching(element, true,
599  * [](const Thing &a, const Thing &b) -> bool { return a == b; });
600  * ...
601  * static jau::cow_vector<Thing>::equal_comparator thingRefEqComparator =
602  * [](const std::shared_ptr<Thing> &a, const std::shared_ptr<Thing> &b) -> bool { return *a == *b; };
603  * ...
604  * cow_vector<std::shared_ptr<Thing>> listOfRefs;
605  * int count = listOfRefs.erase_matching(element, false, thingRefEqComparator);
606  * </pre>
607  * </p>
608  * @param x the value to be added at the tail, if not existing yet.
609  * @param all_matching if true, erase all matching elements, otherwise only the first matching element.
610  * @param comparator the equal comparator to return true if both given elements are equal
611  * @return number of erased elements
612  */
614  int erase_matching(const value_type& x, const bool all_matching, equal_comparator comparator) {
615  int count = 0;
616  std::lock_guard<std::recursive_mutex> lock(mtx_write);
617  storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
618  for(auto it = new_store_ref->begin(); it != new_store_ref->end(); ) {
619  if( comparator( *it, x ) ) {
620  it = new_store_ref->erase(it);
621  ++count;
622  if( !all_matching ) {
623  break;
624  }
625  } else {
626  ++it;
627  }
628  }
629  if( 0 < count ) { // mutated new_store_ref?
630  sc_atomic_critical sync(sync_atomic);
631  store_ref = std::move(new_store_ref);
632  } // else throw away new_store_ref
633  return count;
634  }
635 
636  constexpr_cxx20 std::string toString() const noexcept {
637  std::string res("{ " + std::to_string( size() ) + ": ");
638  int i=0;
639  jau::for_each_const(*this, [&res, &i](const value_type & e) {
640  if( 1 < ++i ) { res.append(", "); }
641  res.append( jau::to_string(e) );
642  } );
643  res.append(" }");
644  return res;
645  }
646  };
647 
648  /****************************************************************************************
649  ****************************************************************************************/
650 
651  template<typename Value_type, typename Alloc_type>
652  std::ostream & operator << (std::ostream &out, const cow_vector<Value_type, Alloc_type> &c) {
653  out << c.toString();
654  return out;
655  }
656 
657  /****************************************************************************************
658  ****************************************************************************************/
659 
660  template<typename Value_type, typename Alloc_type>
662  if( &rhs == &lhs ) {
663  return true;
664  }
666  rhs_cend += rhs.size();
667  return (rhs.size() == lhs.size() && std::equal(rhs.cbegin(), rhs_cend, lhs.cbegin()));
668  }
669  template<typename Value_type, typename Alloc_type>
671  return !(rhs==lhs);
672  }
673 
674  template<typename Value_type, typename Alloc_type>
677  rhs_cend += rhs.size();
679  lhs_cend += lhs.size();
680  return std::lexicographical_compare(rhs.cbegin(), rhs_cend, lhs.begin(), lhs_cend);
681  }
682 
683  template<typename Value_type, typename Alloc_type>
685  { return lhs < rhs; }
686 
687  template<typename Value_type, typename Alloc_type>
689  { return !(lhs < rhs); }
690 
691  template<typename Value_type, typename Alloc_type>
693  { return !(rhs < lhs); }
694 
695  template<typename Value_type, typename Alloc_type>
697  { rhs.swap(lhs); }
698 } /* namespace jau */
699 
700 #endif /* JAU_COW_VECTOR_HPP_ */
constexpr_cxx20
#define constexpr_cxx20
constexpr qualifier replacement for C++20 constexpr.
Definition: cpp_lang_util.hpp:95
jau::cow_vector::~cow_vector
~cow_vector() noexcept
Definition: cow_vector.hpp:262
jau::cow_vector::cow_vector
constexpr_atomic cow_vector(const cow_vector &x)
Definition: cow_vector.hpp:163
jau::cow_vector::cow_vector
constexpr cow_vector(size_type n, const value_type &value, const allocator_type &a=allocator_type())
Definition: cow_vector.hpp:156
jau::cow_ro_iterator
Implementation of a Copy-On-Write (CoW) read-onlu iterator over immutable value_type storage.
Definition: cow_iterator.hpp:44
jau::cow_vector::get_allocator
allocator_type get_allocator() const noexcept
Definition: cow_vector.hpp:381
jau::cow_vector::cow_vector
constexpr cow_vector(const allocator_type &a) noexcept
Definition: cow_vector.hpp:150
jau::cow_vector::equal_comparator
bool(* equal_comparator)(const value_type &a, const value_type &b)
Generic value_type equal comparator to be user defined for e.g.
Definition: cow_vector.hpp:549
jau::cow_vector::cow_vector
constexpr cow_vector() noexcept
Definition: cow_vector.hpp:147
jau::operator<<
std::ostream & operator<<(std::ostream &out, const cow_darray< Value_type, Alloc_type > &c)
Definition: cow_darray.hpp:1029
constexpr_atomic
#define constexpr_atomic
Used when designed to declare a function constexpr, but prohibited by its specific implementation.
Definition: cpp_lang_util.hpp:132
jau::cow_vector::const_pointer
const value_type * const_pointer
Definition: cow_vector.hpp:113
jau::cow_vector::pop_back
constexpr_atomic void pop_back() noexcept
Like std::vector::pop_back().
Definition: cow_vector.hpp:472
jau
Definition: basic_algos.hpp:34
jau::cow_vector::set_store
constexpr_atomic void set_store(storage_ref_t &&new_store_ref) noexcept
Special case facility allowing the user to replace the current store with the given value,...
Definition: cow_vector.hpp:335
jau::cow_vector::erase_matching
constexpr_atomic int erase_matching(const value_type &x, const bool all_matching, equal_comparator comparator)
Erase either the first matching element or all matching elements.
Definition: cow_vector.hpp:614
jau::cow_vector::empty
constexpr_atomic bool empty() const noexcept
Like std::vector::empty().
Definition: cow_vector.hpp:399
jau::cow_vector::cow_vector
constexpr cow_vector(std::initializer_list< value_type > initlist, const allocator_type &alloc=allocator_type())
Create a new instance from an initializer list.
Definition: cow_vector.hpp:258
jau::to_string
PRAGMA_DISABLE_WARNING_POP constexpr_cxx20 std::string to_string(const endian &v) noexcept
Return std::string representation of the given jau::endian.
Definition: byte_util.hpp:198
jau::cow_vector::cow_vector
constexpr cow_vector(InputIt first, InputIt last, const allocator_type &alloc=allocator_type())
Creates a new instance, copying all elements from the given template input-iterator value_type range ...
Definition: cow_vector.hpp:248
jau::cow_vector::toString
constexpr_cxx20 std::string toString() const noexcept
Definition: cow_vector.hpp:636
cow_iterator.hpp
ordered_atomic.hpp
jau::cow_vector::capacity
constexpr_atomic size_type capacity() const noexcept
Definition: cow_vector.hpp:387
jau::ordered_atomic< bool, std::memory_order::memory_order_seq_cst >
jau::cow_vector::pointer
value_type * pointer
Definition: cow_vector.hpp:112
jau::cow_vector::const_iterator
cow_ro_iterator< storage_t, storage_ref_t, cow_container_t > const_iterator
Definition: cow_vector.hpp:129
jau::cow_vector::cow_vector
constexpr_atomic cow_vector(cow_vector &&x) noexcept
Definition: cow_vector.hpp:195
jau::cow_vector::copy_store
constexpr_atomic storage_ref_t copy_store()
Returns a new shared_ptr copy of the underlying store, i.e.
Definition: cow_vector.hpp:302
jau::cow_vector::emplace_back
constexpr_atomic reference emplace_back(Args &&... args)
Like std::vector::emplace_back(), construct a new element in place at the end().
Definition: cow_vector.hpp:532
jau::cow_vector::cow_vector
constexpr cow_vector(size_type n, const allocator_type &a=allocator_type())
Definition: cow_vector.hpp:153
jau::cow_vector::max_size
constexpr size_type max_size() const noexcept
Returns std::numeric_limits<difference_type>::max() as the maximum array size.
Definition: cow_vector.hpp:271
jau::swap
void swap(cow_darray< Value_type, Alloc_type > &rhs, cow_darray< Value_type, Alloc_type > &lhs) noexcept
Definition: cow_darray.hpp:1073
jau::cow_vector::difference_type
std::make_signed< size_type >::type difference_type
Definition: cow_vector.hpp:117
jau::cow_vector::reference
value_type & reference
Definition: cow_vector.hpp:114
jau::sc_atomic_critical
This class provides a RAII-style Sequentially Consistent (SC) data race free (DRF) critical block.
Definition: ordered_atomic.hpp:314
jau::cow_vector::clear
constexpr_atomic void clear() noexcept
Like std::vector::clear(), but ending with zero capacity.
Definition: cow_vector.hpp:436
jau::cow_vector::storage_t
std::vector< value_type, allocator_type > storage_t
Definition: cow_vector.hpp:120
jau::cow_vector::snapshot
constexpr_atomic storage_ref_t snapshot() const noexcept
Returns the current snapshot of the underlying shared std::vector<T> reference.
Definition: cow_vector.hpp:354
jau::cow_vector::storage_ref_t
std::shared_ptr< storage_t > storage_ref_t
Definition: cow_vector.hpp:121
jau::cow_vector::push_back
constexpr_atomic void push_back(const value_type &x)
Like std::vector::push_back(), copy.
Definition: cow_vector.hpp:493
jau::for_each_const
constexpr UnaryFunction for_each_const(T &data, UnaryFunction f, std::enable_if_t< is_cow_type< T >::value, bool >=true) noexcept
Definition: basic_algos.hpp:323
jau::cow_vector::operator=
constexpr_atomic cow_vector & operator=(cow_vector &&x)
Like std::vector::operator=(&&), move.
Definition: cow_vector.hpp:218
jau::cow_vector::swap
constexpr_atomic void swap(cow_vector &x) noexcept
Like std::vector::swap().
Definition: cow_vector.hpp:452
jau::operator<=
bool operator<=(const cow_darray< Value_type, Alloc_type > &rhs, const cow_darray< Value_type, Alloc_type > &lhs)
Definition: cow_darray.hpp:1065
debug.hpp
jau::cow_vector::cbegin
constexpr const_iterator cbegin() const noexcept
See description in jau::cow_darray::cbegin()
Definition: cow_vector.hpp:366
jau::cow_vector::cow_vector
constexpr cow_vector(const storage_t &x)
Definition: cow_vector.hpp:159
jau::cow_vector::push_back_unique
constexpr_atomic bool push_back_unique(const value_type &x, equal_comparator comparator)
Like std::vector::push_back(), but only if the newly added element does not yet exist.
Definition: cow_vector.hpp:576
jau::cow_vector::get_write_mutex
constexpr std::recursive_mutex & get_write_mutex() noexcept
Returns this instances' recursive write mutex, allowing user to implement more complex mutable write ...
Definition: cow_vector.hpp:286
jau::cow_vector::const_reference
const value_type & const_reference
Definition: cow_vector.hpp:115
jau::operator>
bool operator>(const cow_darray< Value_type, Alloc_type > &rhs, const cow_darray< Value_type, Alloc_type > &lhs)
Definition: cow_darray.hpp:1061
jau::cow_vector::operator=
cow_vector & operator=(const cow_vector &x)
Like std::vector::operator=(&), assignment.
Definition: cow_vector.hpp:179
jau::cow_vector::reserve
void reserve(size_type new_capacity)
Definition: cow_vector.hpp:418
jau::cow_vector::size
constexpr_atomic size_type size() const noexcept
Like std::vector::size().
Definition: cow_vector.hpp:411
cpp_lang_util.hpp
jau::cow_vector::allocator_type
Alloc_type allocator_type
Definition: cow_vector.hpp:118
jau::cow_vector::iterator
cow_rw_iterator< storage_t, storage_ref_t, cow_container_t > iterator
Definition: cow_vector.hpp:135
jau::cow_vector::value_type
Value_type value_type
Definition: cow_vector.hpp:111
jau::cow_vector::size_type
std::size_t size_type
Definition: cow_vector.hpp:116
jau::cow_vector::cow_container_t
cow_vector< value_type, allocator_type > cow_container_t
Definition: cow_vector.hpp:123
jau::operator!=
bool operator!=(const callocator< T1 > &lhs, const callocator< T2 > &rhs) noexcept
Definition: callocator.hpp:155
basic_types.hpp
jau::cow_rw_iterator
Implementation of a Copy-On-Write (CoW) read-write iterator over mutable value_type storage.
Definition: cow_iterator.hpp:47
jau::operator>=
bool operator>=(const cow_darray< Value_type, Alloc_type > &rhs, const cow_darray< Value_type, Alloc_type > &lhs)
Definition: cow_darray.hpp:1069
jau::cow_vector::begin
constexpr iterator begin()
See description in jau::cow_darray::begin()
Definition: cow_vector.hpp:375
jau::cow_vector
Implementation of a Copy-On-Write (CoW) using std::vector as the underlying storage,...
Definition: cow_vector.hpp:107
jau::cow_vector::push_back
constexpr_atomic void push_back(value_type &&x)
Like std::vector::push_back(), move.
Definition: cow_vector.hpp:510
jau::operator==
bool operator==(const callocator< T1 > &lhs, const callocator< T2 > &rhs) noexcept
Definition: callocator.hpp:142
jau::operator<
bool operator<(const cow_darray< Value_type, Alloc_type > &rhs, const cow_darray< Value_type, Alloc_type > &lhs)
Definition: cow_darray.hpp:1052