18 #if !defined PLACEHOLDERS
19 #error "The contained type must be defined for a queue template"
27 #define ITEM_DELETE item_delete
29 #define ITEM_CLONE item_clone
32#if !defined ITEM_DELETE
33 #define ITEM_DELETE(value)
36#if !defined ITEM_CLONE
37 #define ITEM_CLONE(value) (*(value))
54#define INVARIANT_CHECK(self) \
56 ASSUME((self)->alloc); \
58 WHEN(!(self)->empty, (self)->head < (self)->capacity && (self)->tail < (self)->capacity)); \
59 ASSUME(WHEN((self)->empty, (self)->head == (self)->tail)); \
60 ASSUME(WHEN(!(self)->data, (self)->head == 0 && (self)->tail == 0));
75 if (capacity_for == 0) {
76 return NS(
SELF,
new)(alloc);
84 capacity *
sizeof(
ITEM));
101 ASSUME(self->head == self->tail);
109 ASSUME(self->tail == self->head);
113 if (self->head <= self->tail) {
114 return (self->tail - self->head) + 1;
116 return (self->capacity - self->head) + self->tail + 1;
123 self->capacity *
sizeof(
ITEM));
124 }
else if (self->head <= self->tail) {
126 (self->capacity - (self->tail + 1)) *
sizeof(
ITEM));
128 self->head *
sizeof(
ITEM));
131 (self->head - (self->tail + 1)) *
sizeof(
ITEM));
139 if (new_capacity_for > self->capacity) {
150 if (self->head > self->tail) {
155 size_t const old_capacity = self->capacity;
156 size_t const additional_capacity = new_capacity - old_capacity;
157 size_t const front_tail_items = self->tail + 1;
158 size_t const back_head_items = old_capacity - self->head;
160 if (front_tail_items > back_head_items) {
161 size_t const new_head = self->head + additional_capacity;
162 memmove(&new_data[new_head], &new_data[self->head], back_head_items *
sizeof(
ITEM));
163 self->head = new_head;
169 ASSUME(front_tail_items <= additional_capacity);
171 memcpy(&new_data[old_capacity], &new_data[0], front_tail_items *
sizeof(
ITEM));
172 self->tail = old_capacity + front_tail_items - 1;
175 self->capacity = new_capacity;
176 self->data = new_data;
193 &self->data[self->tail],
sizeof(
ITEM));
194 self->data[self->tail] =
item;
204 if (self->head == 0) {
205 self->head = self->capacity - 1;
211 &self->data[self->head],
sizeof(
ITEM));
212 self->data[self->head] =
item;
221 ITEM value = self->data[self->head];
223 &self->data[self->head],
sizeof(
ITEM));
225 &self->data[self->head],
sizeof(
ITEM));
227 if (self->head == self->tail) {
240 ITEM value = self->data[self->tail];
242 &self->data[self->tail],
sizeof(
ITEM));
243 if (self->head == self->tail) {
246 if (self->tail == 0) {
247 self->tail = self->capacity - 1;
259 return &self->data[real_index];
274 return &self->data[real_index];
290 if (index <= self->tail) {
291 return &self->data[self->tail - index];
293 size_t const from_end = index - self->tail;
294 return &self->data[self->capacity - from_end];
308 if (index <= self->tail) {
309 return &self->data[self->tail - index];
311 size_t const from_end = index - self->tail;
312 return &self->data[self->capacity - from_end];
322#define ITER NS(SELF, iter)
333static bool NS(ITER,
empty)(ITER
const* iter) {
370 self->capacity *
sizeof(
ITEM));
377#define ITER_CONST NS(SELF, iter_const)
383 SELF const* circular;
388static bool NS(ITER_CONST,
empty)(ITER_CONST
const* iter) {
416 ITEM* new_data = NULL;
418 size_t new_capacity = 0;
428 while ((
item =
NS(ITER_CONST,
next)(&iter))) {
436 .capacity = new_capacity,
439 .empty = self->empty,
440 .alloc = self->alloc,
454#undef INVARIANT_CHECK
static void free(SELF *self, void *ptr)
static void * realloc(SELF *self, void *ptr, size_t size)
static void * malloc(SELF *self, size_t size)
static bool empty_item(IV_PAIR const *const *item)
static ITER_CONST get_iter_const(SELF const *self)
static bool empty(ITER const *iter)
static ITER get_iter(SELF *self)
static SELF new_with_capacity_for(INDEX_TYPE items, ALLOC *alloc)
static INDEX_TYPE size(SELF const *self)
static IV_PAIR const * next(ITER *iter)
#define INVARIANT_CHECK(self)
static SELF clone(SELF const *self)
static ITEM const * read_from_front(SELF const *self, size_t index)
static ITEM pop_front(SELF *self)
static ITEM const * read_from_back(SELF const *self, size_t index)
static ITEM * try_write_from_back(SELF *self, size_t index)
static ITEM pop_back(SELF *self)
static ITEM * write_from_back(SELF *self, size_t index)
static void reserve(SELF *self, size_t new_capacity_for)
static ITEM * try_write_from_front(SELF *self, size_t index)
static ITEM * write_from_front(SELF *self, size_t index)
static void push_back(SELF *self, ITEM item)
static void PRIV set_inaccessible_memory_caps(SELF *self, memory_tracker_capability cap)
static void item_delete(item_t *self)
static ITEM const * try_read_from_back(SELF const *self, size_t index)
static ITEM const * try_read_from_front(SELF const *self, size_t index)
static item_t item_clone(item_t const *self)
static void push_front(SELF *self, ITEM item)
#define TRAIT_QUEUE(SELF)
static size_t position(ITER const *iter)
static ITEM * data(SELF *self)
static gdb_marker gdb_marker_new()
static INLINE CONST size_t next_power_of_2(size_t x)
static size_t INLINE CONST modulus_power_of_2_capacity(size_t index, size_t capacity)
static bool INLINE CONST is_power_of_2(size_t x)
static void memory_tracker_check(memory_tracker_level level, memory_tracker_capability cap, const void *addr, size_t size)
@ MEMORY_TRACKER_LVL_CONTAINER
static void memory_tracker_set(memory_tracker_level level, memory_tracker_capability cap, const volatile void *addr, size_t size)
memory_tracker_capability
a wrapper over asan & msan Containers and allocators can use this for custom asan & msan poisoning,...
@ MEMORY_TRACKER_CAP_NONE
@ MEMORY_TRACKER_CAP_WRITE
static mutation_tracker mutation_tracker_new()
static void mutation_version_check(mutation_version const *self)
static mutation_version mutation_tracker_get(mutation_tracker const *self)
static void mutation_tracker_mutate(mutation_tracker *self)
mutation_tracker iterator_invalidation_tracker
gdb_marker derive_c_circular
A queue comprised of an extendable circular buffer.
tracks a specific version of a value, so that this can be compared later to check modification For ex...