6 static uint64 next_id
= 1;
8 static int cur_prime
= 0;
10 static job all_jobs_init
[12289] = {0};
11 static job
*all_jobs
= all_jobs_init
;
12 static size_t all_jobs_cap
= 12289; /* == primes[0] */
13 static size_t all_jobs_used
= 0;
15 static int hash_table_was_oom
= 0;
20 _get_job_hash_index(uint64 job_id
)
22 return job_id
% all_jobs_cap
;
30 index
= _get_job_hash_index(j
->r
.id
);
32 j
->ht_next
= all_jobs
[index
];
36 /* accept a load factor of 4 */
37 if (all_jobs_used
> (all_jobs_cap
<< 2)) rehash(1);
41 rehash(int is_upscaling
)
44 size_t old_cap
= all_jobs_cap
, old_used
= all_jobs_used
, i
;
45 int old_prime
= cur_prime
;
46 int d
= is_upscaling
? 1 : -1;
48 if (cur_prime
+ d
>= NUM_PRIMES
) return;
49 if (cur_prime
+ d
< 0) return;
50 if (is_upscaling
&& hash_table_was_oom
) return;
54 all_jobs_cap
= primes
[cur_prime
];
55 all_jobs
= calloc(all_jobs_cap
, sizeof(job
));
57 twarnx("Failed to allocate %zu new hash buckets", all_jobs_cap
);
58 hash_table_was_oom
= 1;
59 cur_prime
= old_prime
;
61 all_jobs_cap
= old_cap
;
62 all_jobs_used
= old_used
;
66 hash_table_was_oom
= 0;
68 for (i
= 0; i
< old_cap
; i
++) {
76 if (old
!= all_jobs_init
) {
82 job_find(uint64 job_id
)
85 int index
= _get_job_hash_index(job_id
);
87 for (jh
= all_jobs
[index
]; jh
&& jh
->r
.id
!= job_id
; jh
= jh
->ht_next
);
93 allocate_job(int body_size
)
97 j
= malloc(sizeof(struct job
) + body_size
);
98 if (!j
) return twarnx("OOM"), (job
) 0;
100 memset(j
, 0, sizeof(struct job
));
101 j
->r
.created_at
= nanoseconds();
102 j
->r
.body_size
= body_size
;
103 j
->next
= j
->prev
= j
; /* not in a linked list */
108 make_job_with_id(uint pri
, int64 delay
, int64 ttr
,
109 int body_size
, tube tube
, uint64 id
)
113 j
= allocate_job(body_size
);
114 if (!j
) return twarnx("OOM"), (job
) 0;
118 if (id
>= next_id
) next_id
= id
+ 1;
128 TUBE_ASSIGN(j
->tube
, tube
);
138 slot
= &all_jobs
[_get_job_hash_index(j
->r
.id
)];
139 while (*slot
&& *slot
!= j
) slot
= &(*slot
)->ht_next
;
141 *slot
= (*slot
)->ht_next
;
145 // Downscale when the hashmap is too sparse
146 if (all_jobs_used
< (all_jobs_cap
>> 4)) rehash(0);
153 TUBE_ASSIGN(j
->tube
, NULL
);
154 if (j
->r
.state
!= Copy
) job_hash_free(j
);
161 job_setheappos(void *j
, int pos
)
163 ((job
)j
)->heap_index
= pos
;
167 job_pri_less(void *ax
, void *bx
)
170 if (a
->r
.pri
< b
->r
.pri
) return 1;
171 if (a
->r
.pri
> b
->r
.pri
) return 0;
172 return a
->r
.id
< b
->r
.id
;
176 job_delay_less(void *ax
, void *bx
)
179 if (a
->r
.deadline_at
< b
->r
.deadline_at
) return 1;
180 if (a
->r
.deadline_at
> b
->r
.deadline_at
) return 0;
181 return a
->r
.id
< b
->r
.id
;
191 n
= malloc(sizeof(struct job
) + j
->r
.body_size
);
192 if (!n
) return twarnx("OOM"), (job
) 0;
194 memcpy(n
, j
, sizeof(struct job
) + j
->r
.body_size
);
195 n
->next
= n
->prev
= n
; /* not in a linked list */
197 n
->file
= NULL
; /* copies do not have refcnt on the wal */
199 n
->tube
= 0; /* Don't use memcpy for the tube, which we must refcount. */
200 TUBE_ASSIGN(n
->tube
, j
->tube
);
202 /* Mark this job as a copy so it can be appropriately freed later on */
211 if (j
->r
.state
== Ready
) return "ready";
212 if (j
->r
.state
== Reserved
) return "reserved";
213 if (j
->r
.state
== Buried
) return "buried";
214 if (j
->r
.state
== Delayed
) return "delayed";
219 job_list_any_p(job head
)
221 return head
->next
!= head
|| head
->prev
!= head
;
228 if (!job_list_any_p(j
)) return NULL
; /* not in a doubly-linked list */
230 j
->next
->prev
= j
->prev
;
231 j
->prev
->next
= j
->next
;
233 j
->prev
= j
->next
= j
;
239 job_insert(job head
, job j
)
241 if (job_list_any_p(j
)) return; /* already in a linked list */
243 j
->prev
= head
->prev
;
245 head
->prev
->next
= j
;
259 return all_jobs_used
;