libs/corosio/src/corosio/src/detail/timer_service.cpp

88.4% Lines (237/268) 91.7% Functions (33/36) 69.0% Branches (89/129)
libs/corosio/src/corosio/src/detail/timer_service.cpp
Line Branch Hits Source Code
1 //
2 // Copyright (c) 2026 Steve Gerbino
3 //
4 // Distributed under the Boost Software License, Version 1.0. (See accompanying
5 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // Official repository: https://github.com/cppalliance/corosio
8 //
9
10 #include "src/detail/timer_service.hpp"
11
12 #include <boost/corosio/detail/scheduler.hpp>
13 #include "src/detail/scheduler_op.hpp"
14 #include <boost/capy/error.hpp>
15 #include <boost/capy/ex/executor_ref.hpp>
16 #include <system_error>
17
18 #include <coroutine>
19 #include <limits>
20 #include <mutex>
21 #include <stdexcept>
22 #include <stop_token>
23 #include <vector>
24
25 /*
26 Timer Service
27 =============
28
29 The public timer class holds an opaque timer_impl* and forwards
30 all operations through extern free functions defined at the bottom
31 of this file.
32
33 Data Structures
34 ---------------
35 timer_impl holds per-timer state: expiry, coroutine handle,
36 executor, embedded completion_op, heap index, and free-list link.
37
38 timer_service_impl owns a min-heap of active timers and a free
39 list of recycled impls. The heap is ordered by expiry time; the
40 scheduler queries nearest_expiry() to set the epoll/timerfd
41 timeout.
42
43 Optimization Strategy
44 ---------------------
45 The common timer lifecycle is: construct, set expiry, cancel or
46 wait, destroy. Several optimizations target this path:
47
48 1. Deferred heap insertion — expires_after() stores the expiry
49 but does not insert into the heap. Insertion happens in
50 wait(). If the timer is cancelled or destroyed before wait(),
51 the heap is never touched and no mutex is taken. This also
52 enables the already-expired fast path: when wait() sees
53 expiry <= now before inserting, it posts the coroutine
54 handle to the executor and returns noop_coroutine — no
55 heap, no mutex, no epoll. This is only possible because
56 the coroutine API guarantees wait() always follows
57 expires_after(); callback APIs cannot assume this call
58 order.
59
60 2. Thread-local impl cache — A single-slot per-thread cache of
61 timer_impl avoids the mutex on create/destroy for the common
62 create-then-destroy-on-same-thread pattern. The RAII wrapper
63 tl_impl_cache deletes the cached impl when the thread exits.
64
65 3. Thread-local service cache — Caches the {context, service}
66 pair per-thread to skip find_service() on every timer
67 construction.
68
69 4. Embedded completion_op — timer_impl embeds a scheduler_op
70 subclass, eliminating heap allocation per fire/cancel. Its
71 destroy() is a no-op since the timer_impl owns the lifetime.
72
73 5. Cached nearest expiry — An atomic<int64_t> mirrors the heap
74 root's time, updated under the lock. nearest_expiry() and
75 empty() read the atomic without locking.
76
77 6. might_have_pending_waits_ flag — Set on wait(), cleared on
78 cancel. Lets cancel_timer() return without locking when no
79 wait was ever issued.
80
81 With all fast paths hit (idle timer, same thread), the
82 schedule/cancel cycle takes zero mutex locks.
83 */
84
85 namespace boost::corosio::detail {
86
87 class timer_service_impl;
88
89 void timer_service_invalidate_cache() noexcept;
90
91 struct timer_impl
92 : timer::timer_impl
93 {
94 using clock_type = std::chrono::steady_clock;
95 using time_point = clock_type::time_point;
96 using duration = clock_type::duration;
97
98 // Embedded completion op — avoids heap allocation per fire/cancel
99 struct completion_op final : scheduler_op
100 {
101 timer_impl* impl_ = nullptr;
102
103 static void do_complete(
104 void* owner,
105 scheduler_op* base,
106 std::uint32_t,
107 std::uint32_t);
108
109 129 completion_op() noexcept
110 129 : scheduler_op(&do_complete)
111 {
112 129 }
113
114 void operator()() override;
115 // No-op — lifetime owned by timer_impl, not the scheduler queue
116 void destroy() override {}
117 };
118
119 timer_service_impl* svc_ = nullptr;
120 time_point expiry_;
121 std::size_t heap_index_ = (std::numeric_limits<std::size_t>::max)();
122 // Lets cancel_timer() skip the lock when no wait() was ever issued
123 bool might_have_pending_waits_ = false;
124
125 // Wait operation state
126 std::coroutine_handle<> h_;
127 capy::executor_ref d_;
128 std::error_code* ec_out_ = nullptr;
129 std::stop_token token_;
130 bool waiting_ = false;
131
132 completion_op op_;
133 std::error_code ec_value_;
134
135 // Free list linkage (reused when impl is on free_list)
136 timer_impl* next_free_ = nullptr;
137
138 129 explicit timer_impl(timer_service_impl& svc) noexcept
139 129 : svc_(&svc)
140 {
141 129 op_.impl_ = this;
142 129 }
143
144 void release() override;
145
146 std::coroutine_handle<> wait(
147 std::coroutine_handle<>,
148 capy::executor_ref,
149 std::stop_token,
150 std::error_code*) override;
151 };
152
153 timer_impl* try_pop_tl_cache(timer_service_impl*) noexcept;
154 bool try_push_tl_cache(timer_impl*) noexcept;
155
156 class timer_service_impl : public timer_service
157 {
158 public:
159 using clock_type = std::chrono::steady_clock;
160 using time_point = clock_type::time_point;
161 using key_type = timer_service;
162
163 private:
164 struct heap_entry
165 {
166 time_point time_;
167 timer_impl* timer_;
168 };
169
170 scheduler* sched_ = nullptr;
171 mutable std::mutex mutex_;
172 std::vector<heap_entry> heap_;
173 timer_impl* free_list_ = nullptr;
174 // Tracks impls not on free-list, for shutdown correctness
175 std::size_t live_count_ = 0;
176 callback on_earliest_changed_;
177 // Avoids mutex in nearest_expiry() and empty()
178 mutable std::atomic<std::int64_t> cached_nearest_ns_{
179 (std::numeric_limits<std::int64_t>::max)()};
180
181 public:
182 309 timer_service_impl(capy::execution_context&, scheduler& sched)
183 309 : timer_service()
184 309 , sched_(&sched)
185 {
186 309 }
187
188 15378 scheduler& get_scheduler() noexcept { return *sched_; }
189
190 618 ~timer_service_impl() = default;
191
192 timer_service_impl(timer_service_impl const&) = delete;
193 timer_service_impl& operator=(timer_service_impl const&) = delete;
194
195 309 void set_on_earliest_changed(callback cb) override
196 {
197 309 on_earliest_changed_ = cb;
198 309 }
199
200 309 void shutdown() override
201 {
202 309 timer_service_invalidate_cache();
203
204 // Cancel waiting timers still in the heap
205
1/2
✗ Branch 4 not taken.
✓ Branch 5 taken 309 times.
309 for (auto& entry : heap_)
206 {
207 auto* impl = entry.timer_;
208 if (impl->waiting_)
209 {
210 impl->waiting_ = false;
211 impl->h_.destroy();
212 sched_->on_work_finished();
213 }
214 impl->heap_index_ = (std::numeric_limits<std::size_t>::max)();
215 delete impl;
216 --live_count_;
217 }
218 309 heap_.clear();
219 309 cached_nearest_ns_.store(
220 (std::numeric_limits<std::int64_t>::max)(),
221 std::memory_order_release);
222
223 // Delete free-listed impls
224
2/2
✓ Branch 0 taken 30 times.
✓ Branch 1 taken 309 times.
339 while (free_list_)
225 {
226 30 auto* next = free_list_->next_free_;
227
1/2
✓ Branch 0 taken 30 times.
✗ Branch 1 not taken.
30 delete free_list_;
228 30 free_list_ = next;
229 }
230
231 // Any live timers not in heap and not on free list are still
232 // referenced by timer objects — they'll call destroy_impl()
233 // which will delete them (live_count_ tracks this).
234 309 }
235
236 7792 timer::timer_impl* create_impl() override
237 {
238 7792 timer_impl* impl = try_pop_tl_cache(this);
239
2/2
✓ Branch 0 taken 7663 times.
✓ Branch 1 taken 129 times.
7792 if (impl)
240 {
241 7663 impl->svc_ = this;
242 7663 impl->heap_index_ = (std::numeric_limits<std::size_t>::max)();
243 7663 impl->might_have_pending_waits_ = false;
244 7663 return impl;
245 }
246
247
1/1
✓ Branch 1 taken 129 times.
129 std::lock_guard lock(mutex_);
248
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 129 times.
129 if (free_list_)
249 {
250 impl = free_list_;
251 free_list_ = impl->next_free_;
252 impl->next_free_ = nullptr;
253 impl->heap_index_ = (std::numeric_limits<std::size_t>::max)();
254 impl->might_have_pending_waits_ = false;
255 }
256 else
257 {
258
1/1
✓ Branch 1 taken 129 times.
129 impl = new timer_impl(*this);
259 }
260 129 ++live_count_;
261 129 return impl;
262 129 }
263
264 7792 void destroy_impl(timer_impl& impl)
265 {
266
2/2
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 7790 times.
7792 if (impl.heap_index_ != (std::numeric_limits<std::size_t>::max)())
267 {
268
1/1
✓ Branch 1 taken 2 times.
2 std::lock_guard lock(mutex_);
269
1/1
✓ Branch 1 taken 2 times.
2 remove_timer_impl(impl);
270 2 refresh_cached_nearest();
271 2 }
272
273
2/2
✓ Branch 1 taken 7762 times.
✓ Branch 2 taken 30 times.
7792 if (try_push_tl_cache(&impl))
274 7762 return;
275
276
1/1
✓ Branch 1 taken 30 times.
30 std::lock_guard lock(mutex_);
277 30 impl.next_free_ = free_list_;
278 30 free_list_ = &impl;
279 30 --live_count_;
280 30 }
281
282 // Heap insertion deferred to wait() — avoids lock when timer is idle
283 7795 void update_timer(timer_impl& impl, time_point new_time)
284 {
285 bool in_heap =
286 7795 (impl.heap_index_ != (std::numeric_limits<std::size_t>::max)());
287
3/4
✓ Branch 0 taken 7793 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 7793 times.
✗ Branch 3 not taken.
7795 if (!in_heap && !impl.waiting_)
288 7793 return;
289
290 2 bool notify = false;
291 2 bool was_waiting = false;
292
293 {
294
1/1
✓ Branch 1 taken 2 times.
2 std::lock_guard lock(mutex_);
295
296
1/2
✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
2 if (impl.waiting_)
297 {
298 2 was_waiting = true;
299 2 impl.waiting_ = false;
300 }
301
302
1/2
✓ Branch 1 taken 2 times.
✗ Branch 2 not taken.
2 if (impl.heap_index_ < heap_.size())
303 {
304 2 time_point old_time = heap_[impl.heap_index_].time_;
305 2 heap_[impl.heap_index_].time_ = new_time;
306
307
2/3
✓ Branch 1 taken 2 times.
✓ Branch 4 taken 2 times.
✗ Branch 5 not taken.
2 if (new_time < old_time)
308
1/1
✓ Branch 1 taken 2 times.
2 up_heap(impl.heap_index_);
309 else
310 down_heap(impl.heap_index_);
311
312 2 notify = (impl.heap_index_ == 0);
313 }
314
315 2 refresh_cached_nearest();
316 2 }
317
318
1/2
✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
2 if (was_waiting)
319 {
320 2 impl.ec_value_ = make_error_code(capy::error::canceled);
321 2 sched_->post(&impl.op_);
322 }
323
324
1/2
✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
2 if (notify)
325 2 on_earliest_changed_();
326 }
327
328 // Called from wait() when timer hasn't been inserted into the heap yet
329 7689 void insert_timer(timer_impl& impl)
330 {
331 7689 bool notify = false;
332 {
333
1/1
✓ Branch 1 taken 7689 times.
7689 std::lock_guard lock(mutex_);
334 7689 impl.heap_index_ = heap_.size();
335
1/1
✓ Branch 1 taken 7689 times.
7689 heap_.push_back({impl.expiry_, &impl});
336
1/1
✓ Branch 2 taken 7689 times.
7689 up_heap(heap_.size() - 1);
337 7689 notify = (impl.heap_index_ == 0);
338 7689 refresh_cached_nearest();
339 7689 }
340
2/2
✓ Branch 0 taken 7675 times.
✓ Branch 1 taken 14 times.
7689 if (notify)
341 7675 on_earliest_changed_();
342 7689 }
343
344 14 void cancel_timer(timer_impl& impl)
345 {
346
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 4 times.
14 if (!impl.might_have_pending_waits_)
347 10 return;
348
349 // Not in heap and not waiting — just clear the flag
350 4 if (impl.heap_index_ == (std::numeric_limits<std::size_t>::max)()
351
2/6
✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✓ Branch 5 taken 4 times.
4 && !impl.waiting_)
352 {
353 impl.might_have_pending_waits_ = false;
354 return;
355 }
356
357 4 bool was_waiting = false;
358
359 {
360
1/1
✓ Branch 1 taken 4 times.
4 std::lock_guard lock(mutex_);
361
1/1
✓ Branch 1 taken 4 times.
4 remove_timer_impl(impl);
362
1/2
✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
4 if (impl.waiting_)
363 {
364 4 was_waiting = true;
365 4 impl.waiting_ = false;
366 }
367 4 refresh_cached_nearest();
368 4 }
369
370 4 impl.might_have_pending_waits_ = false;
371
372
1/2
✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
4 if (was_waiting)
373 {
374 4 impl.ec_value_ = make_error_code(capy::error::canceled);
375 4 sched_->post(&impl.op_);
376 }
377 }
378
379 bool empty() const noexcept override
380 {
381 return cached_nearest_ns_.load(std::memory_order_acquire)
382 == (std::numeric_limits<std::int64_t>::max)();
383 }
384
385 18047 time_point nearest_expiry() const noexcept override
386 {
387 18047 auto ns = cached_nearest_ns_.load(std::memory_order_acquire);
388 18047 return time_point(time_point::duration(ns));
389 }
390
391 135283 std::size_t process_expired() override
392 {
393 135283 std::vector<timer_impl*> expired;
394
395 {
396
1/1
✓ Branch 1 taken 135283 times.
135283 std::lock_guard lock(mutex_);
397 135283 auto now = clock_type::now();
398
399
7/7
✓ Branch 1 taken 142628 times.
✓ Branch 2 taken 338 times.
✓ Branch 5 taken 142628 times.
✓ Branch 8 taken 7683 times.
✓ Branch 9 taken 134945 times.
✓ Branch 10 taken 7683 times.
✓ Branch 11 taken 135283 times.
142966 while (!heap_.empty() && heap_[0].time_ <= now)
400 {
401 7683 timer_impl* t = heap_[0].timer_;
402
1/1
✓ Branch 1 taken 7683 times.
7683 remove_timer_impl(*t);
403
404
1/2
✓ Branch 0 taken 7683 times.
✗ Branch 1 not taken.
7683 if (t->waiting_)
405 {
406 7683 t->waiting_ = false;
407 7683 t->ec_value_ = {};
408
1/1
✓ Branch 1 taken 7683 times.
7683 expired.push_back(t);
409 }
410 }
411
412 135283 refresh_cached_nearest();
413 135283 }
414
415
2/2
✓ Branch 5 taken 7683 times.
✓ Branch 6 taken 135283 times.
142966 for (auto* t : expired)
416
1/1
✓ Branch 1 taken 7683 times.
7683 sched_->post(&t->op_);
417
418 270566 return expired.size();
419 135283 }
420
421 private:
422 142980 void refresh_cached_nearest() noexcept
423 {
424 142980 auto ns = heap_.empty()
425
2/2
✓ Branch 0 taken 344 times.
✓ Branch 1 taken 142636 times.
142980 ? (std::numeric_limits<std::int64_t>::max)()
426 142636 : heap_[0].time_.time_since_epoch().count();
427 142980 cached_nearest_ns_.store(ns, std::memory_order_release);
428 142980 }
429
430 7689 void remove_timer_impl(timer_impl& impl)
431 {
432 7689 std::size_t index = impl.heap_index_;
433
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 7689 times.
7689 if (index >= heap_.size())
434 return; // Not in heap
435
436
2/2
✓ Branch 1 taken 81 times.
✓ Branch 2 taken 7608 times.
7689 if (index == heap_.size() - 1)
437 {
438 // Last element, just pop
439 81 impl.heap_index_ = (std::numeric_limits<std::size_t>::max)();
440 81 heap_.pop_back();
441 }
442 else
443 {
444 // Swap with last and reheapify
445 7608 swap_heap(index, heap_.size() - 1);
446 7608 impl.heap_index_ = (std::numeric_limits<std::size_t>::max)();
447 7608 heap_.pop_back();
448
449
2/6
✗ Branch 0 not taken.
✓ Branch 1 taken 7608 times.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
✓ Branch 9 taken 7608 times.
7608 if (index > 0 && heap_[index].time_ < heap_[(index - 1) / 2].time_)
450 up_heap(index);
451 else
452 7608 down_heap(index);
453 }
454 }
455
456 7691 void up_heap(std::size_t index)
457 {
458
2/2
✓ Branch 0 taken 7608 times.
✓ Branch 1 taken 7677 times.
15285 while (index > 0)
459 {
460 7608 std::size_t parent = (index - 1) / 2;
461
2/2
✓ Branch 4 taken 14 times.
✓ Branch 5 taken 7594 times.
7608 if (!(heap_[index].time_ < heap_[parent].time_))
462 14 break;
463 7594 swap_heap(index, parent);
464 7594 index = parent;
465 }
466 7691 }
467
468 7608 void down_heap(std::size_t index)
469 {
470 7608 std::size_t child = index * 2 + 1;
471
2/2
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 7606 times.
7608 while (child < heap_.size())
472 {
473 2 std::size_t min_child = (child + 1 == heap_.size() ||
474 heap_[child].time_ < heap_[child + 1].time_)
475
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 2 times.
2 ? child : child + 1;
476
477
1/2
✓ Branch 4 taken 2 times.
✗ Branch 5 not taken.
2 if (heap_[index].time_ < heap_[min_child].time_)
478 2 break;
479
480 swap_heap(index, min_child);
481 index = min_child;
482 child = index * 2 + 1;
483 }
484 7608 }
485
486 15202 void swap_heap(std::size_t i1, std::size_t i2)
487 {
488 15202 heap_entry tmp = heap_[i1];
489 15202 heap_[i1] = heap_[i2];
490 15202 heap_[i2] = tmp;
491 15202 heap_[i1].timer_->heap_index_ = i1;
492 15202 heap_[i2].timer_->heap_index_ = i2;
493 15202 }
494 };
495
496 void
497 timer_impl::completion_op::
498 do_complete(
499 void* owner,
500 scheduler_op* base,
501 std::uint32_t,
502 std::uint32_t)
503 {
504 if (!owner)
505 return;
506 static_cast<completion_op*>(base)->operator()();
507 }
508
509 void
510 7689 timer_impl::completion_op::
511 operator()()
512 {
513 7689 auto* impl = impl_;
514
1/2
✓ Branch 0 taken 7689 times.
✗ Branch 1 not taken.
7689 if (impl->ec_out_)
515 7689 *impl->ec_out_ = impl->ec_value_;
516
517 7689 auto& sched = impl->svc_->get_scheduler();
518 7689 impl->d_.post(impl->h_);
519 7689 sched.on_work_finished();
520 7689 }
521
522 void
523 7792 timer_impl::
524 release()
525 {
526 7792 svc_->destroy_impl(*this);
527 7792 }
528
529 std::coroutine_handle<>
530 7767 timer_impl::
531 wait(
532 std::coroutine_handle<> h,
533 capy::executor_ref d,
534 std::stop_token token,
535 std::error_code* ec)
536 {
537
1/2
✓ Branch 1 taken 7767 times.
✗ Branch 2 not taken.
7767 if (heap_index_ == (std::numeric_limits<std::size_t>::max)())
538 {
539
3/3
✓ Branch 2 taken 7767 times.
✓ Branch 5 taken 78 times.
✓ Branch 6 taken 7689 times.
7767 if (expiry_ <= clock_type::now())
540 {
541
1/2
✓ Branch 0 taken 78 times.
✗ Branch 1 not taken.
78 if (ec)
542 78 *ec = {};
543 78 d.post(h);
544 78 return std::noop_coroutine();
545 }
546
547 7689 svc_->insert_timer(*this);
548 }
549
550 7689 h_ = h;
551 7689 d_ = std::move(d);
552 7689 token_ = std::move(token);
553 7689 ec_out_ = ec;
554 7689 waiting_ = true;
555 7689 might_have_pending_waits_ = true;
556 7689 svc_->get_scheduler().on_work_started();
557 7689 return std::noop_coroutine();
558 }
559
560 // Extern free functions called from timer.cpp
561 //
562 // Thread-local caches invalidated by timer_service_invalidate_cache()
563 // during shutdown. The service cache avoids find_service overhead per
564 // timer construction. The impl cache avoids the free-list mutex for
565 // the common create-then-destroy-on-same-thread pattern.
566 static thread_local capy::execution_context* cached_ctx = nullptr;
567 static thread_local timer_service_impl* cached_svc = nullptr;
568
569 // RAII wrapper deletes the cached impl when the thread exits
570 struct tl_impl_cache
571 {
572 timer_impl* ptr = nullptr;
573
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 10 times.
10 ~tl_impl_cache() { delete ptr; }
574 };
575 static thread_local tl_impl_cache tl_cached_impl;
576
577 timer_impl*
578 7792 try_pop_tl_cache(timer_service_impl* svc) noexcept
579 {
580
5/6
✓ Branch 1 taken 7663 times.
✓ Branch 2 taken 129 times.
✓ Branch 4 taken 7663 times.
✗ Branch 5 not taken.
✓ Branch 6 taken 7663 times.
✓ Branch 7 taken 129 times.
7792 if (tl_cached_impl.ptr && tl_cached_impl.ptr->svc_ == svc)
581 {
582 7663 auto* impl = tl_cached_impl.ptr;
583 7663 tl_cached_impl.ptr = nullptr;
584 7663 return impl;
585 }
586 129 return nullptr;
587 }
588
589 bool
590 7792 try_push_tl_cache(timer_impl* impl) noexcept
591 {
592
2/2
✓ Branch 1 taken 7762 times.
✓ Branch 2 taken 30 times.
7792 if (!tl_cached_impl.ptr)
593 {
594 7762 tl_cached_impl.ptr = impl;
595 7762 return true;
596 }
597 30 return false;
598 }
599
600 void
601 309 timer_service_invalidate_cache() noexcept
602 {
603 309 cached_ctx = nullptr;
604 309 cached_svc = nullptr;
605
2/2
✓ Branch 1 taken 99 times.
✓ Branch 2 taken 210 times.
309 delete tl_cached_impl.ptr;
606 309 tl_cached_impl.ptr = nullptr;
607 309 }
608
609 timer::timer_impl*
610 7792 timer_service_create(capy::execution_context& ctx)
611 {
612
2/2
✓ Branch 0 taken 101 times.
✓ Branch 1 taken 7691 times.
7792 if (cached_ctx != &ctx)
613 {
614 101 cached_svc = static_cast<timer_service_impl*>(
615 101 ctx.find_service<timer_service>());
616
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 101 times.
101 if (!cached_svc)
617 throw std::runtime_error("timer_service not found");
618 101 cached_ctx = &ctx;
619 }
620 7792 return cached_svc->create_impl();
621 }
622
623 void
624 7792 timer_service_destroy(timer::timer_impl& base) noexcept
625 {
626 7792 static_cast<timer_impl&>(base).release();
627 7792 }
628
629 timer::time_point
630 28 timer_service_expiry(timer::timer_impl& base) noexcept
631 {
632 28 return static_cast<timer_impl&>(base).expiry_;
633 }
634
635 void
636 14 timer_service_expires_at(timer::timer_impl& base, timer::time_point t)
637 {
638 14 auto& impl = static_cast<timer_impl&>(base);
639 14 impl.expiry_ = t;
640 14 impl.svc_->update_timer(impl, t);
641 14 }
642
643 void
644 7781 timer_service_expires_after(timer::timer_impl& base, timer::duration d)
645 {
646 7781 auto& impl = static_cast<timer_impl&>(base);
647
1/1
✓ Branch 2 taken 7781 times.
7781 impl.expiry_ = timer::clock_type::now() + d;
648 7781 impl.svc_->update_timer(impl, impl.expiry_);
649 7781 }
650
651 void
652 14 timer_service_cancel(timer::timer_impl& base) noexcept
653 {
654 14 auto& impl = static_cast<timer_impl&>(base);
655 14 impl.svc_->cancel_timer(impl);
656 14 }
657
658 timer_service&
659 309 get_timer_service(capy::execution_context& ctx, scheduler& sched)
660 {
661 309 return ctx.make_service<timer_service_impl>(sched);
662 }
663
664 } // namespace boost::corosio::detail
665