From 2b1f39cdc0fd8325343db533614188b1e7b3308c Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 09:38:56 -1000 Subject: [PATCH 01/79] feat: significantly improve efficiency of the ServiceBrowser scheduler fixes #821 --- src/zeroconf/_services/browser.pxd | 28 +++- src/zeroconf/_services/browser.py | 226 +++++++++++++++++------------ src/zeroconf/const.py | 1 - 3 files changed, 151 insertions(+), 104 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index a1d79b08..2a1cc1fa 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -17,6 +17,13 @@ cdef cython.uint _TYPE_PTR cdef object SERVICE_STATE_CHANGE_ADDED, SERVICE_STATE_CHANGE_REMOVED, SERVICE_STATE_CHANGE_UPDATED cdef cython.set _ADDRESS_RECORD_TYPES +cdef class _ScheduledQuery: + + cdef public str name + cdef public str type_ + cdef public bint cancelled + cdef public float when + cdef class _DNSPointerOutgoingBucket: cdef public object now @@ -29,7 +36,7 @@ cdef class _DNSPointerOutgoingBucket: cpdef generate_service_query( object zc, double now, - list type_, + set types_, bint multicast, object question_type ) @@ -39,16 +46,21 @@ cdef _group_ptr_queries_with_known_answers(object now, object multicast, cython. cdef class QueryScheduler: - cdef cython.set _types - cdef cython.dict _next_time + cdef _ServiceBrowserBase _browser cdef object _first_random_delay_interval - cdef cython.dict _delay + cdef object _min_time_between_queries + cdef object _loop + cdef unsigned int _startup_queries_sent + cdef dict _scheduled + cdef list _query_heap + cdef object _next_run - cpdef millis_to_wait(self, object now) + cpdef schedule(self, DNSPointer pointer) - cpdef reschedule_type(self, object type_, object next_time) + cpdef cancel(self, DNSPointer pointer) + + cpdef reschedule(self, DNSPointer pointer) - cpdef process_ready_types(self, object now) cdef class _ServiceBrowserBase(RecordUpdateListener): @@ -90,3 +102,5 @@ cdef class _ServiceBrowserBase(RecordUpdateListener): cpdef _cancel_send_timer(self) cpdef async_update_records_complete(self) + + cpdef async_send_ready_queries(self, first_request: bint ,double now, set ready_types) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index ca8c9aa5..874a4c39 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -21,6 +21,7 @@ """ import asyncio +import heapq import queue import random import threading @@ -29,6 +30,7 @@ from types import TracebackType # noqa # used in type hints from typing import ( TYPE_CHECKING, + Any, Callable, Dict, Iterable, @@ -56,7 +58,6 @@ from .._utils.time import current_time_millis, millis_to_seconds from ..const import ( _ADDRESS_RECORD_TYPES, - _BROWSER_BACKOFF_LIMIT, _BROWSER_TIME, _CLASS_IN, _DNS_PACKET_HEADER_LEN, @@ -82,6 +83,8 @@ SERVICE_STATE_CHANGE_REMOVED = ServiceStateChange.Removed SERVICE_STATE_CHANGE_UPDATED = ServiceStateChange.Updated +STARTUP_QUERIES = 3 + if TYPE_CHECKING: from .._core import Zeroconf @@ -93,6 +96,32 @@ _QuestionWithKnownAnswers = Dict[DNSQuestion, Set[DNSPointer]] +class _ScheduledQuery: + + __slots__ = ('name', 'type_', 'cancelled', 'when') + + def __init__(self, name: str, type_: str, when: float) -> None: + """Create a scheduled query.""" + self.name = name + self.type_ = type_ + self.cancelled = False + self.when = when + + def __lt__(self, other: '_ScheduledQuery') -> bool: + """Compare two scheduled queries.""" + return self.when < other.when + + def __gt__(self, other: '_ScheduledQuery') -> bool: + """Compare two scheduled queries.""" + return self.when > other.when + + def __eq__(self, other: Any) -> bool: + """Compare two scheduled queries.""" + if not isinstance(other, _ScheduledQuery): + return NotImplemented + return self.when == other.when + + class _DNSPointerOutgoingBucket: """A DNSOutgoing bucket.""" @@ -164,7 +193,7 @@ def _group_ptr_queries_with_known_answers( def generate_service_query( - zc: 'Zeroconf', now: float_, types_: List[str], multicast: bool, question_type: Optional[DNSQuestionType] + zc: 'Zeroconf', now: float_, types_: set[str], multicast: bool, question_type: Optional[DNSQuestionType] ) -> List[DNSOutgoing]: """Generate a service query for sending with zeroconf.send.""" questions_with_known_answers: _QuestionWithKnownAnswers = {} @@ -223,25 +252,34 @@ class QueryScheduler: """ - __slots__ = ('_types', '_next_time', '_first_random_delay_interval', '_delay') + __slots__ = ( + '_browser', + '_first_random_delay_interval', + '_min_time_between_queries', + '_loop', + '_startup_queries_sent', + '_scheduled', + '_query_heap', + '_next_run', + ) def __init__( self, - types: Set[str], + browser: "_ServiceBrowserBase", delay: int, first_random_delay_interval: Tuple[int, int], ) -> None: - self._types = types - self._next_time: Dict[str, float] = {} + self._browser = browser self._first_random_delay_interval = first_random_delay_interval - self._delay: Dict[str, float] = {check_type_: delay for check_type_ in self._types} - - def start(self, now: float_) -> None: - """Start the scheduler.""" - self._generate_first_next_time(now) + self._min_time_between_queries = millis_to_seconds(delay) + self._loop = asyncio.get_running_loop() + self._startup_queries_sent = 0 + self._scheduled: Dict[str, _ScheduledQuery] = {} + self._query_heap: list[_ScheduledQuery] = [] + self._next_run: Optional[asyncio.TimerHandle] = None - def _generate_first_next_time(self, now: float_) -> None: - """Generate the initial next query times. + def start(self) -> None: + """Start the scheduler. https://datatracker.ietf.org/doc/html/rfc6762#section-5.2 To avoid accidental synchronization when, for some reason, multiple @@ -250,43 +288,75 @@ def _generate_first_next_time(self, now: float_) -> None: also delay the first query of the series by a randomly chosen amount in the range 20-120 ms. """ - delay = millis_to_seconds(random.randint(*self._first_random_delay_interval)) - next_time = now + delay - self._next_time = {check_type_: next_time for check_type_ in self._types} - - def millis_to_wait(self, now: float_) -> float: - """Returns the number of milliseconds to wait for the next event.""" - # Wait for the type has the smallest next time - next_time = min(self._next_time.values()) - return 0 if next_time <= now else next_time - now - - def reschedule_type(self, type_: str_, next_time: float_) -> bool: - """Reschedule the query for a type to happen sooner.""" - if next_time >= self._next_time[type_]: - return False - self._next_time[type_] = next_time - return True - - def _force_reschedule_type(self, type_: str_, next_time: float_) -> None: - """Force a reschedule of a type.""" - self._next_time[type_] = next_time - - def process_ready_types(self, now: float_) -> List[str]: + start_delay = millis_to_seconds(random.randint(*self._first_random_delay_interval)) + self._loop.call_later(start_delay, self._process_ready_types) + + def stop(self) -> None: + """Stop the scheduler.""" + if self._next_run: + self._next_run.cancel() + self._next_run = None + + def schedule(self, pointer: DNSPointer) -> None: + """Schedule a query for a pointer.""" + expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) + scheduled_query = _ScheduledQuery(pointer.alias, pointer.name, expire_time) + self._scheduled[pointer.name] = scheduled_query + heapq.heappush(self._query_heap, scheduled_query) + + def cancel(self, pointer: DNSPointer) -> None: + """Cancel a query for a pointer.""" + self._scheduled[pointer.name].cancelled = True + + def reschedule(self, pointer: DNSPointer) -> None: + """Reschedule a query for a pointer.""" + self.cancel(pointer) + self.schedule(pointer) + + def _process_ready_types(self) -> None: """Generate a list of ready types that is due and schedule the next time.""" - if self.millis_to_wait(now): - return [] + now = current_time_millis() - ready_types: List[str] = [] + if self._startup_queries_sent < STARTUP_QUERIES: + # At first we will send 3 queries to get the cache populated + first_request = self._startup_queries_sent == 0 + self._startup_queries_sent += 1 + self._browser.async_send_ready_queries(first_request, now, self._browser.types) + self._next_run = self._loop.call_later(self._startup_queries_sent**2, self._process_ready_types) + return - for type_, due in self._next_time.items(): - if due > now: - continue + # Once we have sent the initial queries we will send queries + # only when we need to refresh records that are about to expire (aka + # _EXPIRE_REFRESH_TIME_PERCENT which is currently 75% of the TTL) and + # with a minimum time between queries of _min_time_between_queries + # which defaults to 1s + + # Remove cancelled from head of queue. + while self._query_heap and self._query_heap[0].cancelled: + heapq.heappop(self._query_heap) + + ready_types: set[str] = set() + + while self._query_heap: + query = self._query_heap[0] + if query.when >= now: + break + heapq.heappop(self._query_heap) + del self._scheduled[query.name] + ready_types.add(query.type_) + + if ready_types: + self._browser.async_send_ready_queries(False, now, ready_types) - ready_types.append(type_) - self._next_time[type_] = now + self._delay[type_] - self._delay[type_] = min(_BROWSER_BACKOFF_LIMIT * 1000, self._delay[type_] * 2) + next_scheduled = self._query_heap[0].when if self._query_heap else 0 + next_time = now + self._min_time_between_queries - return ready_types + if next_scheduled > next_time: + next_when = next_scheduled + else: + next_when = next_time + + self._next_run = self._loop.call_at(next_when, self._process_ready_types) class _ServiceBrowserBase(RecordUpdateListener): @@ -353,7 +423,7 @@ def __init__( self.question_type = question_type self._pending_handlers: Dict[Tuple[str, str], ServiceStateChange] = {} self._service_state_changed = Signal() - self.query_scheduler = QueryScheduler(self.types, delay, _FIRST_QUERY_DELAY_RANDOM_INTERVAL) + self.query_scheduler = QueryScheduler(self, delay, _FIRST_QUERY_DELAY_RANDOM_INTERVAL) self.done = False self._first_request: bool = True self._next_send_timer: Optional[asyncio.TimerHandle] = None @@ -377,7 +447,6 @@ def _async_start(self) -> None: Must be called by uses of this base class after they have finished setting their properties. """ - self.query_scheduler.start(current_time_millis()) self.zc.async_add_listener(self, [DNSQuestion(type_, _TYPE_PTR, _CLASS_IN) for type_ in self.types]) # Only start queries after the listener is installed self._query_sender_task = asyncio.ensure_future(self._async_start_query_sender()) @@ -432,11 +501,12 @@ def async_update_records(self, zc: 'Zeroconf', now: float_, records: List[Record for type_ in self.types.intersection(cached_possible_types(pointer.name)): if old_record is None: self._enqueue_callback(SERVICE_STATE_CHANGE_ADDED, type_, pointer.alias) + self.query_scheduler.schedule(pointer) elif pointer.is_expired(now): self._enqueue_callback(SERVICE_STATE_CHANGE_REMOVED, type_, pointer.alias) + self.query_scheduler.cancel(pointer) else: - expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) - self.reschedule_type(type_, now, expire_time) + self.query_scheduler.reschedule(pointer) continue # If its expired or already exists in the cache it cannot be updated. @@ -487,68 +557,32 @@ def _fire_service_state_changed_event(self, event: Tuple[Tuple[str, str], Servic def _async_cancel(self) -> None: """Cancel the browser.""" self.done = True - self._cancel_send_timer() + self.query_scheduler.stop() self.zc.async_remove_listener(self) assert self._query_sender_task is not None, "Attempted to cancel a browser that was not started" self._query_sender_task.cancel() - def _generate_ready_queries(self, first_request: bool_, now: float_) -> List[DNSOutgoing]: - """Generate the service browser query for any type that is due.""" - ready_types = self.query_scheduler.process_ready_types(now) - if not ready_types: - return [] + async def _async_start_query_sender(self) -> None: + """Start scheduling queries.""" + if not self.zc.started: + await self.zc.async_wait_for_start() + self.query_scheduler.start() + def async_send_ready_queries(self, first_request: bool, now: float_, ready_types: set[str]) -> None: + """Send any ready queries.""" + if self.done or self.zc.done: + return # If they did not specify and this is the first request, ask QU questions # https://datatracker.ietf.org/doc/html/rfc6762#section-5.4 since we are # just starting up and we know our cache is likely empty. This ensures # the next outgoing will be sent with the known answers list. question_type = DNSQuestionType.QU if not self.question_type and first_request else self.question_type - return generate_service_query(self.zc, now, ready_types, self.multicast, question_type) - - async def _async_start_query_sender(self) -> None: - """Start scheduling queries.""" - if not self.zc.started: - await self.zc.async_wait_for_start() - self._async_send_ready_queries_schedule_next() - - def _cancel_send_timer(self) -> None: - """Cancel the next send.""" - if self._next_send_timer: - self._next_send_timer.cancel() - self._next_send_timer = None - - def reschedule_type(self, type_: str_, now: float_, next_time: float_) -> None: - """Reschedule a type to be refreshed in the future.""" - if self.query_scheduler.reschedule_type(type_, next_time): - # We need to send the queries before rescheduling the next one - # otherwise we may be scheduling a query to go out in the next - # iteration of the event loop which should be sent now. - if now >= next_time: - self._async_send_ready_queries(now) - self._cancel_send_timer() - self._async_schedule_next(now) - - def _async_send_ready_queries(self, now: float_) -> None: - """Send any ready queries.""" - outs = self._generate_ready_queries(self._first_request, now) + outs = generate_service_query(self.zc, now, ready_types, self.multicast, question_type) if outs: self._first_request = False for out in outs: self.zc.async_send(out, addr=self.addr, port=self.port) - def _async_send_ready_queries_schedule_next(self) -> None: - """Send ready queries and schedule next one checking for done first.""" - if self.done or self.zc.done: - return - now = current_time_millis() - self._async_send_ready_queries(now) - self._async_schedule_next(now) - - def _async_schedule_next(self, now: float_) -> None: - """Scheule the next time.""" - delay = millis_to_seconds(self.query_scheduler.millis_to_wait(now)) - self._next_send_timer = self._loop.call_later(delay, self._async_send_ready_queries_schedule_next) - class ServiceBrowser(_ServiceBrowserBase, threading.Thread): """Used to browse for a service of a specific type. diff --git a/src/zeroconf/const.py b/src/zeroconf/const.py index ca199df5..47bd6a33 100644 --- a/src/zeroconf/const.py +++ b/src/zeroconf/const.py @@ -32,7 +32,6 @@ _BROWSER_TIME = 1000 # ms _DUPLICATE_QUESTION_INTERVAL = _BROWSER_TIME - 1 # ms _DUPLICATE_PACKET_SUPPRESSION_INTERVAL = 1000 -_BROWSER_BACKOFF_LIMIT = 3600 # s _CACHE_CLEANUP_INTERVAL = 10 # s _LOADED_SYSTEM_TIMEOUT = 10 # s _STARTUP_TIMEOUT = 9 # s must be lower than _LOADED_SYSTEM_TIMEOUT From d8a3a554d1e9b73b211da2c7d052e451aefbede9 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 09:47:31 -1000 Subject: [PATCH 02/79] feat: significantly improve efficiency of the ServiceBrowser scheduler fixes #821 --- src/zeroconf/_services/browser.pxd | 20 ++++++-------------- src/zeroconf/_services/browser.py | 21 ++++++++++++++------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 2a1cc1fa..16930227 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -17,6 +17,8 @@ cdef cython.uint _TYPE_PTR cdef object SERVICE_STATE_CHANGE_ADDED, SERVICE_STATE_CHANGE_REMOVED, SERVICE_STATE_CHANGE_UPDATED cdef cython.set _ADDRESS_RECORD_TYPES +cdef object heappop, heappush + cdef class _ScheduledQuery: cdef public str name @@ -47,8 +49,8 @@ cdef _group_ptr_queries_with_known_answers(object now, object multicast, cython. cdef class QueryScheduler: cdef _ServiceBrowserBase _browser - cdef object _first_random_delay_interval - cdef object _min_time_between_queries + cdef double _first_random_delay_interval + cdef double _min_time_between_queries cdef object _loop cdef unsigned int _startup_queries_sent cdef dict _scheduled @@ -61,6 +63,8 @@ cdef class QueryScheduler: cpdef reschedule(self, DNSPointer pointer) + @cython.locals(query=_ScheduledQuery, next_scheduled=_ScheduledQuery, next_when=double) + cpdef _process_ready_types(self) cdef class _ServiceBrowserBase(RecordUpdateListener): @@ -80,8 +84,6 @@ cdef class _ServiceBrowserBase(RecordUpdateListener): cdef public object _next_send_timer cdef public object _query_sender_task - cpdef _generate_ready_queries(self, object first_request, object now) - cpdef _enqueue_callback(self, object state_change, object type_, object name) @cython.locals(record_update=RecordUpdate, record=DNSRecord, cache=DNSCache, service=DNSRecord, pointer=DNSPointer) @@ -89,18 +91,8 @@ cdef class _ServiceBrowserBase(RecordUpdateListener): cpdef cython.list _names_matching_types(self, object types) - cpdef reschedule_type(self, object type_, object now, object next_time) - cpdef _fire_service_state_changed_event(self, cython.tuple event) - cpdef _async_send_ready_queries_schedule_next(self) - - cpdef _async_schedule_next(self, object now) - - cpdef _async_send_ready_queries(self, object now) - - cpdef _cancel_send_timer(self) - cpdef async_update_records_complete(self) cpdef async_send_ready_queries(self, first_request: bint ,double now, set ready_types) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 874a4c39..c93a5b97 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -95,6 +95,9 @@ _QuestionWithKnownAnswers = Dict[DNSQuestion, Set[DNSPointer]] +heappop = heapq.heappop +heappush = heapq.heappush + class _ScheduledQuery: @@ -302,7 +305,7 @@ def schedule(self, pointer: DNSPointer) -> None: expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) scheduled_query = _ScheduledQuery(pointer.alias, pointer.name, expire_time) self._scheduled[pointer.name] = scheduled_query - heapq.heappush(self._query_heap, scheduled_query) + heappush(self._query_heap, scheduled_query) def cancel(self, pointer: DNSPointer) -> None: """Cancel a query for a pointer.""" @@ -332,27 +335,31 @@ def _process_ready_types(self) -> None: # which defaults to 1s # Remove cancelled from head of queue. - while self._query_heap and self._query_heap[0].cancelled: - heapq.heappop(self._query_heap) + while self._query_heap: + query = self._query_heap[0] + if not query.cancelled: + break + heappop(self._query_heap) ready_types: set[str] = set() + next_scheduled: _ScheduledQuery | None = None while self._query_heap: query = self._query_heap[0] if query.when >= now: + next_scheduled = query break - heapq.heappop(self._query_heap) + heappop(self._query_heap) del self._scheduled[query.name] ready_types.add(query.type_) if ready_types: self._browser.async_send_ready_queries(False, now, ready_types) - next_scheduled = self._query_heap[0].when if self._query_heap else 0 next_time = now + self._min_time_between_queries - if next_scheduled > next_time: - next_when = next_scheduled + if next_scheduled and next_scheduled.when > next_time: + next_when = next_scheduled.when else: next_when = next_time From ba6ee705be59e4ff5a7f103fd66052f0b40ef48f Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 09:50:00 -1000 Subject: [PATCH 03/79] fix: py3.8 still supported --- src/zeroconf/_services/browser.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index c93a5b97..89d092e9 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -196,7 +196,7 @@ def _group_ptr_queries_with_known_answers( def generate_service_query( - zc: 'Zeroconf', now: float_, types_: set[str], multicast: bool, question_type: Optional[DNSQuestionType] + zc: 'Zeroconf', now: float_, types_: Set[str], multicast: bool, question_type: Optional[DNSQuestionType] ) -> List[DNSOutgoing]: """Generate a service query for sending with zeroconf.send.""" questions_with_known_answers: _QuestionWithKnownAnswers = {} @@ -341,8 +341,8 @@ def _process_ready_types(self) -> None: break heappop(self._query_heap) - ready_types: set[str] = set() - next_scheduled: _ScheduledQuery | None = None + ready_types: Set[str] = set() + next_scheduled: Optional[_ScheduledQuery] = None while self._query_heap: query = self._query_heap[0] @@ -575,7 +575,7 @@ async def _async_start_query_sender(self) -> None: await self.zc.async_wait_for_start() self.query_scheduler.start() - def async_send_ready_queries(self, first_request: bool, now: float_, ready_types: set[str]) -> None: + def async_send_ready_queries(self, first_request: bool, now: float_, ready_types: Set[str]) -> None: """Send any ready queries.""" if self.done or self.zc.done: return From e4f5f2054898a865b26d128a630994163c88be24 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 09:51:06 -1000 Subject: [PATCH 04/79] fix: py3.8 still supported --- src/zeroconf/_services/browser.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 16930227..e11e7313 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -49,7 +49,7 @@ cdef _group_ptr_queries_with_known_answers(object now, object multicast, cython. cdef class QueryScheduler: cdef _ServiceBrowserBase _browser - cdef double _first_random_delay_interval + cdef tuple _first_random_delay_interval cdef double _min_time_between_queries cdef object _loop cdef unsigned int _startup_queries_sent From 26fbe21b52fd7edaf7a3037b21d5eb6db675662c Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 09:54:18 -1000 Subject: [PATCH 05/79] fix: py3.8 still supported --- src/zeroconf/_services/browser.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 89d092e9..1ba33f91 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -275,13 +275,13 @@ def __init__( self._browser = browser self._first_random_delay_interval = first_random_delay_interval self._min_time_between_queries = millis_to_seconds(delay) - self._loop = asyncio.get_running_loop() + self._loop: Optional[asyncio.AbstractEventLoop] = None self._startup_queries_sent = 0 self._scheduled: Dict[str, _ScheduledQuery] = {} self._query_heap: list[_ScheduledQuery] = [] self._next_run: Optional[asyncio.TimerHandle] = None - def start(self) -> None: + def start(self, loop: asyncio.AbstractEventLoop) -> None: """Start the scheduler. https://datatracker.ietf.org/doc/html/rfc6762#section-5.2 @@ -292,7 +292,8 @@ def start(self) -> None: in the range 20-120 ms. """ start_delay = millis_to_seconds(random.randint(*self._first_random_delay_interval)) - self._loop.call_later(start_delay, self._process_ready_types) + self._loop = loop + loop.call_later(start_delay, self._process_ready_types) def stop(self) -> None: """Stop the scheduler.""" @@ -318,6 +319,8 @@ def reschedule(self, pointer: DNSPointer) -> None: def _process_ready_types(self) -> None: """Generate a list of ready types that is due and schedule the next time.""" + if TYPE_CHECKING: + assert self._loop is not None now = current_time_millis() if self._startup_queries_sent < STARTUP_QUERIES: @@ -573,7 +576,7 @@ async def _async_start_query_sender(self) -> None: """Start scheduling queries.""" if not self.zc.started: await self.zc.async_wait_for_start() - self.query_scheduler.start() + self.query_scheduler.start(self._loop) def async_send_ready_queries(self, first_request: bool, now: float_, ready_types: Set[str]) -> None: """Send any ready queries.""" From 675fdf78f673d510b0db88a0ca56a16141a4ff0d Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 10:08:14 -1000 Subject: [PATCH 06/79] fix: debug --- examples/browser.py | 2 +- src/zeroconf/_services/browser.pxd | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/browser.py b/examples/browser.py index a456a9eb..237de013 100755 --- a/examples/browser.py +++ b/examples/browser.py @@ -66,7 +66,7 @@ def on_service_state_change( zeroconf = Zeroconf(ip_version=ip_version) - services = ["_http._tcp.local.", "_hap._tcp.local."] + services = ["_http._tcp.local.", "_hap._tcp.local.", "_esphomelib._tcp.local.", "_airplay._tcp.local."] if args.find: services = list(ZeroconfServiceTypes.find(zc=zeroconf)) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index e11e7313..e322cc83 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -61,6 +61,7 @@ cdef class QueryScheduler: cpdef cancel(self, DNSPointer pointer) + @cython.locals(current=_ScheduledQuery) cpdef reschedule(self, DNSPointer pointer) @cython.locals(query=_ScheduledQuery, next_scheduled=_ScheduledQuery, next_when=double) From 2f79898ffe5aa62cd58ddcd285ea6e495876851e Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 10:18:43 -1000 Subject: [PATCH 07/79] fix: time --- src/zeroconf/_services/browser.pxd | 8 +++--- src/zeroconf/_services/browser.py | 45 +++++++++++++++++------------- src/zeroconf/_utils/time.pxd | 2 +- src/zeroconf/_utils/time.py | 6 ++-- 4 files changed, 35 insertions(+), 26 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index e322cc83..734fbaac 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -28,7 +28,7 @@ cdef class _ScheduledQuery: cdef class _DNSPointerOutgoingBucket: - cdef public object now + cdef public double now_millis cdef public DNSOutgoing out cdef public cython.uint bytes @@ -37,14 +37,14 @@ cdef class _DNSPointerOutgoingBucket: @cython.locals(cache=DNSCache, question_history=QuestionHistory, record=DNSRecord, qu_question=bint) cpdef generate_service_query( object zc, - double now, + double now_millis, set types_, bint multicast, object question_type ) @cython.locals(answer=DNSPointer, query_buckets=list, question=DNSQuestion, max_compressed_size=cython.uint, max_bucket_size=cython.uint, query_bucket=_DNSPointerOutgoingBucket) -cdef _group_ptr_queries_with_known_answers(object now, object multicast, cython.dict question_with_known_answers) +cdef _group_ptr_queries_with_known_answers(double now_millis, object multicast, cython.dict question_with_known_answers) cdef class QueryScheduler: @@ -96,4 +96,4 @@ cdef class _ServiceBrowserBase(RecordUpdateListener): cpdef async_update_records_complete(self) - cpdef async_send_ready_queries(self, first_request: bint ,double now, set ready_types) + cpdef async_send_ready_queries(self, first_request: bint, double now_millis, set ready_types) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 1ba33f91..2fde3575 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -128,11 +128,11 @@ def __eq__(self, other: Any) -> bool: class _DNSPointerOutgoingBucket: """A DNSOutgoing bucket.""" - __slots__ = ('now', 'out', 'bytes') + __slots__ = ('now_millis', 'out', 'bytes') - def __init__(self, now: float, multicast: bool) -> None: - """Create a bucke to wrap a DNSOutgoing.""" - self.now = now + def __init__(self, now_millis: float, multicast: bool) -> None: + """Create a bucket to wrap a DNSOutgoing.""" + self.now_millis = now_millis self.out = DNSOutgoing(_FLAGS_QR_QUERY, multicast=multicast) self.bytes = 0 @@ -140,7 +140,7 @@ def add(self, max_compressed_size: int_, question: DNSQuestion, answers: Set[DNS """Add a new set of questions and known answers to the outgoing.""" self.out.add_question(question) for answer in answers: - self.out.add_answer_at_time(answer, self.now) + self.out.add_answer_at_time(answer, self.now_millis) self.bytes += max_compressed_size @@ -159,7 +159,7 @@ def group_ptr_queries_with_known_answers( def _group_ptr_queries_with_known_answers( - now: float_, multicast: bool_, question_with_known_answers: _QuestionWithKnownAnswers + now_millis: float_, multicast: bool_, question_with_known_answers: _QuestionWithKnownAnswers ) -> List[DNSOutgoing]: """Inner wrapper for group_ptr_queries_with_known_answers.""" # This is the maximum size the query + known answers can be with name compression. @@ -188,7 +188,7 @@ def _group_ptr_queries_with_known_answers( # If a single question and known answers won't fit in a packet # we will end up generating multiple packets, but there will never # be multiple questions - query_bucket = _DNSPointerOutgoingBucket(now, multicast) + query_bucket = _DNSPointerOutgoingBucket(now_millis, multicast) query_bucket.add(max_compressed_size, question, answers) query_buckets.append(query_bucket) @@ -196,7 +196,11 @@ def _group_ptr_queries_with_known_answers( def generate_service_query( - zc: 'Zeroconf', now: float_, types_: Set[str], multicast: bool, question_type: Optional[DNSQuestionType] + zc: 'Zeroconf', + now_millis: float_, + types_: Set[str], + multicast: bool, + question_type: Optional[DNSQuestionType], ) -> List[DNSOutgoing]: """Generate a service query for sending with zeroconf.send.""" questions_with_known_answers: _QuestionWithKnownAnswers = {} @@ -209,9 +213,9 @@ def generate_service_query( known_answers = { record for record in cache.get_all_by_details(type_, _TYPE_PTR, _CLASS_IN) - if not record.is_stale(now) + if not record.is_stale(now_millis) } - if not qu_question and question_history.suppresses(question, now, known_answers): + if not qu_question and question_history.suppresses(question, now_millis, known_answers): log.debug("Asking %s was suppressed by the question history", question) continue if TYPE_CHECKING: @@ -220,9 +224,9 @@ def generate_service_query( pointer_known_answers = known_answers questions_with_known_answers[question] = pointer_known_answers if not qu_question: - question_history.add_question_at_time(question, now, known_answers) + question_history.add_question_at_time(question, now_millis, known_answers) - return _group_ptr_queries_with_known_answers(now, multicast, questions_with_known_answers) + return _group_ptr_queries_with_known_answers(now_millis, multicast, questions_with_known_answers) def _on_change_dispatcher( @@ -321,13 +325,14 @@ def _process_ready_types(self) -> None: """Generate a list of ready types that is due and schedule the next time.""" if TYPE_CHECKING: assert self._loop is not None - now = current_time_millis() + now_millis = current_time_millis() + now_seconds = millis_to_seconds(now_millis) if self._startup_queries_sent < STARTUP_QUERIES: # At first we will send 3 queries to get the cache populated first_request = self._startup_queries_sent == 0 + self._browser.async_send_ready_queries(first_request, now_millis, self._browser.types) self._startup_queries_sent += 1 - self._browser.async_send_ready_queries(first_request, now, self._browser.types) self._next_run = self._loop.call_later(self._startup_queries_sent**2, self._process_ready_types) return @@ -349,7 +354,7 @@ def _process_ready_types(self) -> None: while self._query_heap: query = self._query_heap[0] - if query.when >= now: + if query.when >= now_seconds: next_scheduled = query break heappop(self._query_heap) @@ -357,9 +362,9 @@ def _process_ready_types(self) -> None: ready_types.add(query.type_) if ready_types: - self._browser.async_send_ready_queries(False, now, ready_types) + self._browser.async_send_ready_queries(False, now_millis, ready_types) - next_time = now + self._min_time_between_queries + next_time = now_seconds + self._min_time_between_queries if next_scheduled and next_scheduled.when > next_time: next_when = next_scheduled.when @@ -578,7 +583,9 @@ async def _async_start_query_sender(self) -> None: await self.zc.async_wait_for_start() self.query_scheduler.start(self._loop) - def async_send_ready_queries(self, first_request: bool, now: float_, ready_types: Set[str]) -> None: + def async_send_ready_queries( + self, first_request: bool, now_millis: float_, ready_types: Set[str] + ) -> None: """Send any ready queries.""" if self.done or self.zc.done: return @@ -587,7 +594,7 @@ def async_send_ready_queries(self, first_request: bool, now: float_, ready_types # just starting up and we know our cache is likely empty. This ensures # the next outgoing will be sent with the known answers list. question_type = DNSQuestionType.QU if not self.question_type and first_request else self.question_type - outs = generate_service_query(self.zc, now, ready_types, self.multicast, question_type) + outs = generate_service_query(self.zc, now_millis, ready_types, self.multicast, question_type) if outs: self._first_request = False for out in outs: diff --git a/src/zeroconf/_utils/time.pxd b/src/zeroconf/_utils/time.pxd index a9600286..fd51e39a 100644 --- a/src/zeroconf/_utils/time.pxd +++ b/src/zeroconf/_utils/time.pxd @@ -1,4 +1,4 @@ cpdef double current_time_millis() -cpdef millis_to_seconds(object millis) +cpdef double millis_to_seconds(double millis) diff --git a/src/zeroconf/_utils/time.py b/src/zeroconf/_utils/time.py index c6811585..600d9028 100644 --- a/src/zeroconf/_utils/time.py +++ b/src/zeroconf/_utils/time.py @@ -26,15 +26,17 @@ _float = float -def current_time_millis() -> float: +def current_time_millis() -> _float: """Current time in milliseconds. The current implemention uses `time.monotonic` but may change in the future. + + The design requires the time to match asyncio.loop.time() """ return time.monotonic() * 1000 -def millis_to_seconds(millis: _float) -> float: +def millis_to_seconds(millis: _float) -> _float: """Convert milliseconds to seconds.""" return millis / 1000.0 From 6ecea13bc67afaccc06a6ac035987f807eb902a3 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 10:21:19 -1000 Subject: [PATCH 08/79] fix: time --- src/zeroconf/_services/browser.pxd | 4 ++-- src/zeroconf/_services/browser.py | 29 ++++++++++++++--------------- src/zeroconf/_utils/time.pxd | 2 +- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 734fbaac..8c6e51ae 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -24,7 +24,7 @@ cdef class _ScheduledQuery: cdef public str name cdef public str type_ cdef public bint cancelled - cdef public float when + cdef public double when_millis cdef class _DNSPointerOutgoingBucket: @@ -50,7 +50,7 @@ cdef class QueryScheduler: cdef _ServiceBrowserBase _browser cdef tuple _first_random_delay_interval - cdef double _min_time_between_queries + cdef double _min_time_between_queries_millis cdef object _loop cdef unsigned int _startup_queries_sent cdef dict _scheduled diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 2fde3575..0c140e04 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -101,28 +101,28 @@ class _ScheduledQuery: - __slots__ = ('name', 'type_', 'cancelled', 'when') + __slots__ = ('name', 'type_', 'cancelled', 'when_millis') - def __init__(self, name: str, type_: str, when: float) -> None: + def __init__(self, name: str, type_: str, when_millis: float) -> None: """Create a scheduled query.""" self.name = name self.type_ = type_ self.cancelled = False - self.when = when + self.when_millis = when_millis def __lt__(self, other: '_ScheduledQuery') -> bool: """Compare two scheduled queries.""" - return self.when < other.when + return self.when_millis < other.when_millis def __gt__(self, other: '_ScheduledQuery') -> bool: """Compare two scheduled queries.""" - return self.when > other.when + return self.when_millis > other.when_millis def __eq__(self, other: Any) -> bool: """Compare two scheduled queries.""" if not isinstance(other, _ScheduledQuery): return NotImplemented - return self.when == other.when + return self.when_millis == other.when_millis class _DNSPointerOutgoingBucket: @@ -262,7 +262,7 @@ class QueryScheduler: __slots__ = ( '_browser', '_first_random_delay_interval', - '_min_time_between_queries', + '_min_time_between_queries_millis', '_loop', '_startup_queries_sent', '_scheduled', @@ -278,7 +278,7 @@ def __init__( ) -> None: self._browser = browser self._first_random_delay_interval = first_random_delay_interval - self._min_time_between_queries = millis_to_seconds(delay) + self._min_time_between_queries_millis = delay self._loop: Optional[asyncio.AbstractEventLoop] = None self._startup_queries_sent = 0 self._scheduled: Dict[str, _ScheduledQuery] = {} @@ -326,7 +326,6 @@ def _process_ready_types(self) -> None: if TYPE_CHECKING: assert self._loop is not None now_millis = current_time_millis() - now_seconds = millis_to_seconds(now_millis) if self._startup_queries_sent < STARTUP_QUERIES: # At first we will send 3 queries to get the cache populated @@ -354,7 +353,7 @@ def _process_ready_types(self) -> None: while self._query_heap: query = self._query_heap[0] - if query.when >= now_seconds: + if query.when_millis >= now_millis: next_scheduled = query break heappop(self._query_heap) @@ -364,14 +363,14 @@ def _process_ready_types(self) -> None: if ready_types: self._browser.async_send_ready_queries(False, now_millis, ready_types) - next_time = now_seconds + self._min_time_between_queries + next_time_millis = now_millis + self._min_time_between_queries_millis - if next_scheduled and next_scheduled.when > next_time: - next_when = next_scheduled.when + if next_scheduled and next_scheduled.when_millis > next_time_millis: + next_when_millis = next_scheduled.when_millis else: - next_when = next_time + next_when_millis = next_time_millis - self._next_run = self._loop.call_at(next_when, self._process_ready_types) + self._next_run = self._loop.call_at(millis_to_seconds(next_when_millis), self._process_ready_types) class _ServiceBrowserBase(RecordUpdateListener): diff --git a/src/zeroconf/_utils/time.pxd b/src/zeroconf/_utils/time.pxd index fd51e39a..f6e70fe7 100644 --- a/src/zeroconf/_utils/time.pxd +++ b/src/zeroconf/_utils/time.pxd @@ -1,4 +1,4 @@ cpdef double current_time_millis() -cpdef double millis_to_seconds(double millis) +cpdef millis_to_seconds(double millis) From a758bccc159d9e2c79008cd18bf9f1656d8ee54b Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 10:44:09 -1000 Subject: [PATCH 09/79] fix: slow memory leak --- src/zeroconf/_services/browser.pxd | 2 +- src/zeroconf/_services/browser.py | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 8c6e51ae..b6c3c4bb 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -53,7 +53,7 @@ cdef class QueryScheduler: cdef double _min_time_between_queries_millis cdef object _loop cdef unsigned int _startup_queries_sent - cdef dict _scheduled + cdef dict _next_scheduled_for_name cdef list _query_heap cdef object _next_run diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 0c140e04..401be675 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -265,7 +265,7 @@ class QueryScheduler: '_min_time_between_queries_millis', '_loop', '_startup_queries_sent', - '_scheduled', + '_next_scheduled_for_name', '_query_heap', '_next_run', ) @@ -281,7 +281,7 @@ def __init__( self._min_time_between_queries_millis = delay self._loop: Optional[asyncio.AbstractEventLoop] = None self._startup_queries_sent = 0 - self._scheduled: Dict[str, _ScheduledQuery] = {} + self._next_scheduled_for_name: Dict[str, _ScheduledQuery] = {} self._query_heap: list[_ScheduledQuery] = [] self._next_run: Optional[asyncio.TimerHandle] = None @@ -309,12 +309,13 @@ def schedule(self, pointer: DNSPointer) -> None: """Schedule a query for a pointer.""" expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) scheduled_query = _ScheduledQuery(pointer.alias, pointer.name, expire_time) - self._scheduled[pointer.name] = scheduled_query + self._next_scheduled_for_name[pointer.name] = scheduled_query heappush(self._query_heap, scheduled_query) def cancel(self, pointer: DNSPointer) -> None: """Cancel a query for a pointer.""" - self._scheduled[pointer.name].cancelled = True + self._next_scheduled_for_name[pointer.name].cancelled = True + del self._next_scheduled_for_name[pointer.name] def reschedule(self, pointer: DNSPointer) -> None: """Reschedule a query for a pointer.""" @@ -357,7 +358,7 @@ def _process_ready_types(self) -> None: next_scheduled = query break heappop(self._query_heap) - del self._scheduled[query.name] + del self._next_scheduled_for_name[query.name] ready_types.add(query.type_) if ready_types: From 430e678bbb280888b82646d1c7984073faab826b Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 11:30:49 -1000 Subject: [PATCH 10/79] fix: ensure all equality methods are implemented --- src/zeroconf/_services/browser.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 401be675..8f4bdee7 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -114,9 +114,10 @@ def __lt__(self, other: '_ScheduledQuery') -> bool: """Compare two scheduled queries.""" return self.when_millis < other.when_millis - def __gt__(self, other: '_ScheduledQuery') -> bool: - """Compare two scheduled queries.""" - return self.when_millis > other.when_millis + def __le__(self, other: Any) -> bool: + if isinstance(other, _ScheduledQuery): + return self.when_millis < other.when_millis or self.__eq__(other) + return NotImplemented def __eq__(self, other: Any) -> bool: """Compare two scheduled queries.""" @@ -124,6 +125,15 @@ def __eq__(self, other: Any) -> bool: return NotImplemented return self.when_millis == other.when_millis + def __ge__(self, other: Any) -> bool: + if isinstance(other, _ScheduledQuery): + return self.when_millis > other.when_millis or self.__eq__(other) + return NotImplemented + + def __gt__(self, other: '_ScheduledQuery') -> bool: + """Compare two scheduled queries.""" + return self.when_millis > other.when_millis + class _DNSPointerOutgoingBucket: """A DNSOutgoing bucket.""" From 2370685d48f6359def16d0a27d86dc5b6b92fc09 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 11:31:07 -1000 Subject: [PATCH 11/79] fix: ensure all equality methods are implemented --- src/zeroconf/_services/browser.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 8f4bdee7..844379de 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -115,6 +115,7 @@ def __lt__(self, other: '_ScheduledQuery') -> bool: return self.when_millis < other.when_millis def __le__(self, other: Any) -> bool: + """Compare two scheduled queries.""" if isinstance(other, _ScheduledQuery): return self.when_millis < other.when_millis or self.__eq__(other) return NotImplemented @@ -126,6 +127,7 @@ def __eq__(self, other: Any) -> bool: return self.when_millis == other.when_millis def __ge__(self, other: Any) -> bool: + """Compare two scheduled queries.""" if isinstance(other, _ScheduledQuery): return self.when_millis > other.when_millis or self.__eq__(other) return NotImplemented From 946247a7c1c2675532efd09a8e60adc8ee1cdb00 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 11:31:57 -1000 Subject: [PATCH 12/79] fix: cleanup cancel method --- src/zeroconf/_services/browser.pxd | 1 + 1 file changed, 1 insertion(+) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index b6c3c4bb..0503b170 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -59,6 +59,7 @@ cdef class QueryScheduler: cpdef schedule(self, DNSPointer pointer) + @cython.locals(scheduled=_ScheduledQuery) cpdef cancel(self, DNSPointer pointer) @cython.locals(current=_ScheduledQuery) From 43abc64c9e1170d5ce3980e6d7a6a77704c97e91 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 11:32:03 -1000 Subject: [PATCH 13/79] fix: cleanup cancel method --- src/zeroconf/_services/browser.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 844379de..2c00c0c7 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -326,8 +326,8 @@ def schedule(self, pointer: DNSPointer) -> None: def cancel(self, pointer: DNSPointer) -> None: """Cancel a query for a pointer.""" - self._next_scheduled_for_name[pointer.name].cancelled = True - del self._next_scheduled_for_name[pointer.name] + scheduled = self._next_scheduled_for_name.pop(pointer.name) + scheduled.cancelled = True def reschedule(self, pointer: DNSPointer) -> None: """Reschedule a query for a pointer.""" From 740c8c75e73ed6447a6adff8c5d53da6629a4b46 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 11:36:57 -1000 Subject: [PATCH 14/79] feat: split schedule methods --- src/zeroconf/_services/browser.pxd | 2 ++ src/zeroconf/_services/browser.py | 32 ++++++++++++++++++++---------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 0503b170..8496d30e 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -65,6 +65,8 @@ cdef class QueryScheduler: @cython.locals(current=_ScheduledQuery) cpdef reschedule(self, DNSPointer pointer) + cpdef _process_startup_queries(self) + @cython.locals(query=_ScheduledQuery, next_scheduled=_ScheduledQuery, next_when=double) cpdef _process_ready_types(self) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 2c00c0c7..a289f85a 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -309,7 +309,7 @@ def start(self, loop: asyncio.AbstractEventLoop) -> None: """ start_delay = millis_to_seconds(random.randint(*self._first_random_delay_interval)) self._loop = loop - loop.call_later(start_delay, self._process_ready_types) + self._next_run = loop.call_later(start_delay, self._process_startup_queries) def stop(self) -> None: """Stop the scheduler.""" @@ -334,22 +334,32 @@ def reschedule(self, pointer: DNSPointer) -> None: self.cancel(pointer) self.schedule(pointer) - def _process_ready_types(self) -> None: - """Generate a list of ready types that is due and schedule the next time.""" + def _process_startup_queries(self) -> None: if TYPE_CHECKING: assert self._loop is not None now_millis = current_time_millis() - if self._startup_queries_sent < STARTUP_QUERIES: - # At first we will send 3 queries to get the cache populated - first_request = self._startup_queries_sent == 0 - self._browser.async_send_ready_queries(first_request, now_millis, self._browser.types) - self._startup_queries_sent += 1 - self._next_run = self._loop.call_later(self._startup_queries_sent**2, self._process_ready_types) + # At first we will send 3 queries to get the cache populated + self._browser.async_send_ready_queries( + self._startup_queries_sent == 0, now_millis, self._browser.types + ) + self._startup_queries_sent += 1 + + # Once we finish sending the initial queries we will + # switch to a strategy of sending queries only when we + # need to refresh records that are about to expire + if self._startup_queries_sent >= STARTUP_QUERIES: + self._process_ready_types() return - # Once we have sent the initial queries we will send queries - # only when we need to refresh records that are about to expire (aka + self._next_run = self._loop.call_later(self._startup_queries_sent**2, self._process_startup_queries) + + def _process_ready_types(self) -> None: + """Generate a list of ready types that is due and schedule the next time.""" + if TYPE_CHECKING: + assert self._loop is not None + now_millis = current_time_millis() + # Refresh records that are about to expire (aka # _EXPIRE_REFRESH_TIME_PERCENT which is currently 75% of the TTL) and # with a minimum time between queries of _min_time_between_queries # which defaults to 1s From b028a6daf89efd21088694b310a78ed03ab71c9b Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 11:38:44 -1000 Subject: [PATCH 15/79] fix: typing error --- src/zeroconf/_services/browser.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 8496d30e..ff405f37 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -99,4 +99,4 @@ cdef class _ServiceBrowserBase(RecordUpdateListener): cpdef async_update_records_complete(self) - cpdef async_send_ready_queries(self, first_request: bint, double now_millis, set ready_types) + cpdef async_send_ready_queries(self, bint first_request, double now_millis, set ready_types) From eddf279afcf1e4d3eac42f1d483bc1cd568d3c71 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 11:43:43 -1000 Subject: [PATCH 16/79] fix: always ask QU when populating the cache --- src/zeroconf/_services/browser.pxd | 7 ++++--- src/zeroconf/_services/browser.py | 8 +++----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index ff405f37..b9508d9a 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -16,6 +16,7 @@ cdef cython.uint _EXPIRE_REFRESH_TIME_PERCENT, _MAX_MSG_TYPICAL, _DNS_PACKET_HEA cdef cython.uint _TYPE_PTR cdef object SERVICE_STATE_CHANGE_ADDED, SERVICE_STATE_CHANGE_REMOVED, SERVICE_STATE_CHANGE_UPDATED cdef cython.set _ADDRESS_RECORD_TYPES +cdef cython.uint STARTUP_QUERIES cdef object heappop, heappush @@ -35,7 +36,7 @@ cdef class _DNSPointerOutgoingBucket: cpdef add(self, cython.uint max_compressed_size, DNSQuestion question, cython.set answers) @cython.locals(cache=DNSCache, question_history=QuestionHistory, record=DNSRecord, qu_question=bint) -cpdef generate_service_query( +cpdef list generate_service_query( object zc, double now_millis, set types_, @@ -78,13 +79,13 @@ cdef class _ServiceBrowserBase(RecordUpdateListener): cdef object _loop cdef public object addr cdef public object port - cdef public object multicast + cdef public bint multicast cdef public object question_type cdef public cython.dict _pending_handlers cdef public object _service_state_changed cdef public QueryScheduler query_scheduler cdef public bint done - cdef public object _first_request + cdef public bint _first_request cdef public object _next_send_timer cdef public object _query_sender_task diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index a289f85a..ef7f3ac3 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -340,9 +340,7 @@ def _process_startup_queries(self) -> None: now_millis = current_time_millis() # At first we will send 3 queries to get the cache populated - self._browser.async_send_ready_queries( - self._startup_queries_sent == 0, now_millis, self._browser.types - ) + self._browser.async_send_ready_queries(True, now_millis, self._browser.types) self._startup_queries_sent += 1 # Once we finish sending the initial queries we will @@ -388,7 +386,7 @@ def _process_ready_types(self) -> None: next_time_millis = now_millis + self._min_time_between_queries_millis - if next_scheduled and next_scheduled.when_millis > next_time_millis: + if next_scheduled is not None and next_scheduled.when_millis > next_time_millis: next_when_millis = next_scheduled.when_millis else: next_when_millis = next_time_millis @@ -620,7 +618,7 @@ def async_send_ready_queries( if outs: self._first_request = False for out in outs: - self.zc.async_send(out, addr=self.addr, port=self.port) + self.zc.async_send(out, self.addr, self.port) class ServiceBrowser(_ServiceBrowserBase, threading.Thread): From fc62b4b4242df951c41d8249e2aba948c6a39926 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 11:47:04 -1000 Subject: [PATCH 17/79] fix: tweaks --- src/zeroconf/_services/browser.pxd | 3 +++ src/zeroconf/_services/browser.py | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index b9508d9a..311b4227 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -14,10 +14,13 @@ cdef bint TYPE_CHECKING cdef object cached_possible_types cdef cython.uint _EXPIRE_REFRESH_TIME_PERCENT, _MAX_MSG_TYPICAL, _DNS_PACKET_HEADER_LEN cdef cython.uint _TYPE_PTR +cdef object _CLASS_IN cdef object SERVICE_STATE_CHANGE_ADDED, SERVICE_STATE_CHANGE_REMOVED, SERVICE_STATE_CHANGE_UPDATED cdef cython.set _ADDRESS_RECORD_TYPES cdef cython.uint STARTUP_QUERIES +cdef object QU_QUESTION + cdef object heappop, heappush cdef class _ScheduledQuery: diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index ef7f3ac3..fafead69 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -83,6 +83,8 @@ SERVICE_STATE_CHANGE_REMOVED = ServiceStateChange.Removed SERVICE_STATE_CHANGE_UPDATED = ServiceStateChange.Updated +QU_QUESTION = DNSQuestionType.QU + STARTUP_QUERIES = 3 if TYPE_CHECKING: @@ -216,7 +218,7 @@ def generate_service_query( ) -> List[DNSOutgoing]: """Generate a service query for sending with zeroconf.send.""" questions_with_known_answers: _QuestionWithKnownAnswers = {} - qu_question = not multicast if question_type is None else question_type == DNSQuestionType.QU + qu_question = not multicast if question_type is None else question_type is QU_QUESTION question_history = zc.question_history cache = zc.cache for type_ in types_: @@ -613,7 +615,7 @@ def async_send_ready_queries( # https://datatracker.ietf.org/doc/html/rfc6762#section-5.4 since we are # just starting up and we know our cache is likely empty. This ensures # the next outgoing will be sent with the known answers list. - question_type = DNSQuestionType.QU if not self.question_type and first_request else self.question_type + question_type = QU_QUESTION if self.question_type is None and first_request else self.question_type outs = generate_service_query(self.zc, now_millis, ready_types, self.multicast, question_type) if outs: self._first_request = False From a695b05749b3e2c83592a85b9f91e4c51713da0b Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 11:50:16 -1000 Subject: [PATCH 18/79] fix: tweaks --- src/zeroconf/_services/browser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index fafead69..7b38360f 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -315,7 +315,7 @@ def start(self, loop: asyncio.AbstractEventLoop) -> None: def stop(self) -> None: """Stop the scheduler.""" - if self._next_run: + if self._next_run is not None: self._next_run.cancel() self._next_run = None From 150d146d906f677606d7f156f01df6bf1fa2bff4 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 12:03:02 -1000 Subject: [PATCH 19/79] fix: can be cancelled when we get a goodbye first --- src/zeroconf/_services/browser.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 7b38360f..1e282dce 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -328,8 +328,9 @@ def schedule(self, pointer: DNSPointer) -> None: def cancel(self, pointer: DNSPointer) -> None: """Cancel a query for a pointer.""" - scheduled = self._next_scheduled_for_name.pop(pointer.name) - scheduled.cancelled = True + scheduled = self._next_scheduled_for_name.pop(pointer.name, None) + if scheduled: + scheduled.cancelled = True def reschedule(self, pointer: DNSPointer) -> None: """Reschedule a query for a pointer.""" From 36550aaf159198f830a1e5b1305fd21bb4505826 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 12:08:02 -1000 Subject: [PATCH 20/79] fix: tweaks --- src/zeroconf/_services/browser.pxd | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 311b4227..d9172846 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -19,6 +19,8 @@ cdef object SERVICE_STATE_CHANGE_ADDED, SERVICE_STATE_CHANGE_REMOVED, SERVICE_ST cdef cython.set _ADDRESS_RECORD_TYPES cdef cython.uint STARTUP_QUERIES +cdef object _MDNS_PORT, _BROWSER_TIME + cdef object QU_QUESTION cdef object heappop, heappush From 644c4bb05dc9b66acef2d356c539a74190374f4a Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 12:15:39 -1000 Subject: [PATCH 21/79] fix: types --- src/zeroconf/_protocol/outgoing.pxd | 6 +++--- src/zeroconf/_protocol/outgoing.py | 4 ++-- src/zeroconf/_services/browser.pxd | 2 ++ src/zeroconf/_services/browser.py | 2 +- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/zeroconf/_protocol/outgoing.pxd b/src/zeroconf/_protocol/outgoing.pxd index 3460f0c7..4353757a 100644 --- a/src/zeroconf/_protocol/outgoing.pxd +++ b/src/zeroconf/_protocol/outgoing.pxd @@ -127,16 +127,16 @@ cdef class DNSOutgoing: ) cpdef packets(self) - cpdef add_question_or_all_cache(self, DNSCache cache, object now, str name, object type_, object class_) + cpdef add_question_or_all_cache(self, DNSCache cache, double now, str name, object type_, object class_) - cpdef add_question_or_one_cache(self, DNSCache cache, object now, str name, object type_, object class_) + cpdef add_question_or_one_cache(self, DNSCache cache, double now, str name, object type_, object class_) cpdef add_question(self, DNSQuestion question) cpdef add_answer(self, DNSIncoming inp, DNSRecord record) @cython.locals(now_double=double) - cpdef add_answer_at_time(self, DNSRecord record, object now) + cpdef add_answer_at_time(self, DNSRecord record, double now) cpdef add_authorative_answer(self, DNSPointer record) diff --git a/src/zeroconf/_protocol/outgoing.py b/src/zeroconf/_protocol/outgoing.py index e94cd0d2..57f98169 100644 --- a/src/zeroconf/_protocol/outgoing.py +++ b/src/zeroconf/_protocol/outgoing.py @@ -148,9 +148,9 @@ def add_question(self, record: DNSQuestion) -> None: def add_answer(self, inp: DNSIncoming, record: DNSRecord) -> None: """Adds an answer""" if not record.suppressed_by(inp): - self.add_answer_at_time(record, 0) + self.add_answer_at_time(record, 0.0) - def add_answer_at_time(self, record: Optional[DNSRecord], now: Union[float, int]) -> None: + def add_answer_at_time(self, record: Optional[DNSRecord], now: float_) -> None: """Adds an answer if it does not expire by a certain time""" now_double = now if record is not None and (now_double == 0 or not record.is_expired(now_double)): diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index d9172846..19197922 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -23,6 +23,8 @@ cdef object _MDNS_PORT, _BROWSER_TIME cdef object QU_QUESTION +cdef object _FLAGS_QR_QUERY + cdef object heappop, heappush cdef class _ScheduledQuery: diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 1e282dce..3ac23249 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -147,7 +147,7 @@ class _DNSPointerOutgoingBucket: def __init__(self, now_millis: float, multicast: bool) -> None: """Create a bucket to wrap a DNSOutgoing.""" self.now_millis = now_millis - self.out = DNSOutgoing(_FLAGS_QR_QUERY, multicast=multicast) + self.out = DNSOutgoing(_FLAGS_QR_QUERY, multicast) self.bytes = 0 def add(self, max_compressed_size: int_, question: DNSQuestion, answers: Set[DNSPointer]) -> None: From 23822662911b7c4e80d66eef922eb4f42b171319 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 12:16:58 -1000 Subject: [PATCH 22/79] fix: types --- src/zeroconf/_services/browser.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 19197922..a0eab4bd 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -52,7 +52,7 @@ cpdef list generate_service_query( ) @cython.locals(answer=DNSPointer, query_buckets=list, question=DNSQuestion, max_compressed_size=cython.uint, max_bucket_size=cython.uint, query_bucket=_DNSPointerOutgoingBucket) -cdef _group_ptr_queries_with_known_answers(double now_millis, object multicast, cython.dict question_with_known_answers) +cdef list _group_ptr_queries_with_known_answers(double now_millis, object multicast, cython.dict question_with_known_answers) cdef class QueryScheduler: From b3aae507fb9b8c75731eaa29299cba8727b44302 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 12:17:45 -1000 Subject: [PATCH 23/79] fix: types --- src/zeroconf/_services/browser.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index a0eab4bd..650dd3fa 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -52,7 +52,7 @@ cpdef list generate_service_query( ) @cython.locals(answer=DNSPointer, query_buckets=list, question=DNSQuestion, max_compressed_size=cython.uint, max_bucket_size=cython.uint, query_bucket=_DNSPointerOutgoingBucket) -cdef list _group_ptr_queries_with_known_answers(double now_millis, object multicast, cython.dict question_with_known_answers) +cdef list _group_ptr_queries_with_known_answers(double now_millis, bint multicast, cython.dict question_with_known_answers) cdef class QueryScheduler: From f33ab3306341f2e93cd074ff365dc1a3d215059d Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 12:19:43 -1000 Subject: [PATCH 24/79] fix: types --- src/zeroconf/_services/info.pxd | 2 +- src/zeroconf/_services/info.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/zeroconf/_services/info.pxd b/src/zeroconf/_services/info.pxd index c53342cb..ecc2a534 100644 --- a/src/zeroconf/_services/info.pxd +++ b/src/zeroconf/_services/info.pxd @@ -124,4 +124,4 @@ cdef class ServiceInfo(RecordUpdateListener): cpdef async_clear_cache(self) @cython.locals(cache=DNSCache) - cdef _generate_request_query(self, object zc, object now, object question_type) + cdef _generate_request_query(self, object zc, double now, object question_type) diff --git a/src/zeroconf/_services/info.py b/src/zeroconf/_services/info.py index 962e76bf..3a27e10a 100644 --- a/src/zeroconf/_services/info.py +++ b/src/zeroconf/_services/info.py @@ -845,7 +845,7 @@ def _generate_request_query( out.add_question_or_one_cache(cache, now, name, _TYPE_TXT, _CLASS_IN) out.add_question_or_all_cache(cache, now, server_or_name, _TYPE_A, _CLASS_IN) out.add_question_or_all_cache(cache, now, server_or_name, _TYPE_AAAA, _CLASS_IN) - if question_type == DNS_QUESTION_TYPE_QU: + if question_type is DNS_QUESTION_TYPE_QU: for question in out.questions: question.unicast = True return out From 48347427d990ac72be5669a0b90742811eb7ac73 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 12:29:35 -1000 Subject: [PATCH 25/79] fix: debug --- src/zeroconf/_services/browser.py | 29 +++++++++++++++++++++++++++-- src/zeroconf/const.py | 2 +- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 3ac23249..9f5242f3 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -334,7 +334,32 @@ def cancel(self, pointer: DNSPointer) -> None: def reschedule(self, pointer: DNSPointer) -> None: """Reschedule a query for a pointer.""" - self.cancel(pointer) + current = self._next_scheduled_for_name.get(pointer.name) + if current is not None: + expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) + # If the expire time is within self._min_time_between_queries_millis + # of the current scheduled time avoid churn by not rescheduling + import pprint + + pprint.pprint( + [ + 'Check if we should reschedule', + current.when_millis, + expire_time, + current.when_millis - expire_time, + ] + ) + if ( + -self._min_time_between_queries_millis + <= expire_time - current.when_millis + <= self._min_time_between_queries_millis + ): + pprint.pprint( + ['No reschedule', current.when_millis, expire_time, current.when_millis - expire_time] + ) + return + pprint.pprint(['Reschedule', current.when_millis, expire_time, current.when_millis - expire_time]) + current.cancelled = True self.schedule(pointer) def _process_startup_queries(self) -> None: @@ -363,7 +388,7 @@ def _process_ready_types(self) -> None: # Refresh records that are about to expire (aka # _EXPIRE_REFRESH_TIME_PERCENT which is currently 75% of the TTL) and # with a minimum time between queries of _min_time_between_queries - # which defaults to 1s + # which defaults to 10s # Remove cancelled from head of queue. while self._query_heap: diff --git a/src/zeroconf/const.py b/src/zeroconf/const.py index 47bd6a33..4e277fc0 100644 --- a/src/zeroconf/const.py +++ b/src/zeroconf/const.py @@ -29,7 +29,7 @@ _CHECK_TIME = 175 # ms _REGISTER_TIME = 225 # ms _LISTENER_TIME = 200 # ms -_BROWSER_TIME = 1000 # ms +_BROWSER_TIME = 10000 # ms _DUPLICATE_QUESTION_INTERVAL = _BROWSER_TIME - 1 # ms _DUPLICATE_PACKET_SUPPRESSION_INTERVAL = 1000 _CACHE_CLEANUP_INTERVAL = 10 # s From cae5d4e9596f6476367a6ea82600c2470109348e Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 12:32:40 -1000 Subject: [PATCH 26/79] fix: reduce churn --- src/zeroconf/_services/browser.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 9f5242f3..c478392c 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -339,26 +339,12 @@ def reschedule(self, pointer: DNSPointer) -> None: expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) # If the expire time is within self._min_time_between_queries_millis # of the current scheduled time avoid churn by not rescheduling - import pprint - - pprint.pprint( - [ - 'Check if we should reschedule', - current.when_millis, - expire_time, - current.when_millis - expire_time, - ] - ) if ( -self._min_time_between_queries_millis <= expire_time - current.when_millis <= self._min_time_between_queries_millis ): - pprint.pprint( - ['No reschedule', current.when_millis, expire_time, current.when_millis - expire_time] - ) return - pprint.pprint(['Reschedule', current.when_millis, expire_time, current.when_millis - expire_time]) current.cancelled = True self.schedule(pointer) From a81c2e609b81905e0ae73d21f29d6a82a044787a Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 12:43:08 -1000 Subject: [PATCH 27/79] fix: reduce churn --- src/zeroconf/_services/browser.pxd | 14 ++++++++------ src/zeroconf/_services/browser.py | 9 ++++++--- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 650dd3fa..9c8a6a92 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -65,18 +65,20 @@ cdef class QueryScheduler: cdef list _query_heap cdef object _next_run - cpdef schedule(self, DNSPointer pointer) + cpdef void schedule(self, DNSPointer pointer) + + cdef void _schedule(self, DNSPointer pointer, double expire_time) @cython.locals(scheduled=_ScheduledQuery) - cpdef cancel(self, DNSPointer pointer) + cpdef void cancel(self, DNSPointer pointer) - @cython.locals(current=_ScheduledQuery) - cpdef reschedule(self, DNSPointer pointer) + @cython.locals(current=_ScheduledQuery, expire_time=double) + cpdef void reschedule(self, DNSPointer pointer) - cpdef _process_startup_queries(self) + cpdef void _process_startup_queries(self) @cython.locals(query=_ScheduledQuery, next_scheduled=_ScheduledQuery, next_when=double) - cpdef _process_ready_types(self) + cpdef void _process_ready_types(self) cdef class _ServiceBrowserBase(RecordUpdateListener): diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index c478392c..3a355ff7 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -321,7 +321,10 @@ def stop(self) -> None: def schedule(self, pointer: DNSPointer) -> None: """Schedule a query for a pointer.""" - expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) + self._schedule(pointer, pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT)) + + def _schedule(self, pointer: DNSPointer, expire_time: float_) -> None: + """Schedule a query for a pointer.""" scheduled_query = _ScheduledQuery(pointer.alias, pointer.name, expire_time) self._next_scheduled_for_name[pointer.name] = scheduled_query heappush(self._query_heap, scheduled_query) @@ -335,8 +338,8 @@ def cancel(self, pointer: DNSPointer) -> None: def reschedule(self, pointer: DNSPointer) -> None: """Reschedule a query for a pointer.""" current = self._next_scheduled_for_name.get(pointer.name) + expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) if current is not None: - expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) # If the expire time is within self._min_time_between_queries_millis # of the current scheduled time avoid churn by not rescheduling if ( @@ -346,7 +349,7 @@ def reschedule(self, pointer: DNSPointer) -> None: ): return current.cancelled = True - self.schedule(pointer) + self._schedule(pointer, expire_time) def _process_startup_queries(self) -> None: if TYPE_CHECKING: From 3213a7f938d2e7eff3c913c5834a3a0bd5c66b84 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 12:48:38 -1000 Subject: [PATCH 28/79] fix: more efficient --- src/zeroconf/_services/browser.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 3a355ff7..55c188f7 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -379,18 +379,14 @@ def _process_ready_types(self) -> None: # with a minimum time between queries of _min_time_between_queries # which defaults to 10s - # Remove cancelled from head of queue. - while self._query_heap: - query = self._query_heap[0] - if not query.cancelled: - break - heappop(self._query_heap) - ready_types: Set[str] = set() next_scheduled: Optional[_ScheduledQuery] = None while self._query_heap: query = self._query_heap[0] + if query.cancelled: + heappop(self._query_heap) + continue if query.when_millis >= now_millis: next_scheduled = query break From 20afec35a1f881ee3fddf2a6060206d5be6c14ec Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 12:52:42 -1000 Subject: [PATCH 29/79] fix: account for clock resolution so we are not late --- src/zeroconf/_services/browser.pxd | 1 + src/zeroconf/_services/browser.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 9c8a6a92..47569dc6 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -64,6 +64,7 @@ cdef class QueryScheduler: cdef dict _next_scheduled_for_name cdef list _query_heap cdef object _next_run + cdef double _clock_resolution_millis cpdef void schedule(self, DNSPointer pointer) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 55c188f7..13cadf86 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -25,6 +25,7 @@ import queue import random import threading +import time import warnings from functools import partial from types import TracebackType # noqa # used in type hints @@ -282,6 +283,7 @@ class QueryScheduler: '_next_scheduled_for_name', '_query_heap', '_next_run', + '_clock_resolution_millis', ) def __init__( @@ -298,6 +300,7 @@ def __init__( self._next_scheduled_for_name: Dict[str, _ScheduledQuery] = {} self._query_heap: list[_ScheduledQuery] = [] self._next_run: Optional[asyncio.TimerHandle] = None + self._clock_resolution_millis = time.get_clock_info('monotonic').resolution * 1000 def start(self, loop: asyncio.AbstractEventLoop) -> None: """Start the scheduler. @@ -381,13 +384,14 @@ def _process_ready_types(self) -> None: ready_types: Set[str] = set() next_scheduled: Optional[_ScheduledQuery] = None + end_time_millis = now_millis + self._clock_resolution_millis while self._query_heap: query = self._query_heap[0] if query.cancelled: heappop(self._query_heap) continue - if query.when_millis >= now_millis: + if query.when_millis > end_time_millis: next_scheduled = query break heappop(self._query_heap) From 9799615168f47fb7b18554d7f853593077f164d3 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 13:06:46 -1000 Subject: [PATCH 30/79] fix: const --- src/zeroconf/const.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/const.py b/src/zeroconf/const.py index 4e277fc0..ba00c19c 100644 --- a/src/zeroconf/const.py +++ b/src/zeroconf/const.py @@ -30,8 +30,8 @@ _REGISTER_TIME = 225 # ms _LISTENER_TIME = 200 # ms _BROWSER_TIME = 10000 # ms -_DUPLICATE_QUESTION_INTERVAL = _BROWSER_TIME - 1 # ms _DUPLICATE_PACKET_SUPPRESSION_INTERVAL = 1000 +_DUPLICATE_QUESTION_INTERVAL = _DUPLICATE_PACKET_SUPPRESSION_INTERVAL - 1 # ms _CACHE_CLEANUP_INTERVAL = 10 # s _LOADED_SYSTEM_TIMEOUT = 10 # s _STARTUP_TIMEOUT = 9 # s must be lower than _LOADED_SYSTEM_TIMEOUT From a3f5b84e18a1f5e370b5d3be8ae670ec1b14432f Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 13:11:26 -1000 Subject: [PATCH 31/79] fix: tweak startup queries for bad wifi --- src/zeroconf/_services/browser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 13cadf86..2c3f9516 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -86,7 +86,7 @@ QU_QUESTION = DNSQuestionType.QU -STARTUP_QUERIES = 3 +STARTUP_QUERIES = 5 if TYPE_CHECKING: from .._core import Zeroconf From 04b1021b4fe2c9a5e92540ac2a9c9ac732dee237 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 14:08:39 -1000 Subject: [PATCH 32/79] fixup --- src/zeroconf/const.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/const.py b/src/zeroconf/const.py index ba00c19c..f39e1313 100644 --- a/src/zeroconf/const.py +++ b/src/zeroconf/const.py @@ -30,7 +30,7 @@ _REGISTER_TIME = 225 # ms _LISTENER_TIME = 200 # ms _BROWSER_TIME = 10000 # ms -_DUPLICATE_PACKET_SUPPRESSION_INTERVAL = 1000 +_DUPLICATE_PACKET_SUPPRESSION_INTERVAL = 1000 # ms _DUPLICATE_QUESTION_INTERVAL = _DUPLICATE_PACKET_SUPPRESSION_INTERVAL - 1 # ms _CACHE_CLEANUP_INTERVAL = 10 # s _LOADED_SYSTEM_TIMEOUT = 10 # s From 037d7d21f2ec36d9061af1e41b362b8548b83e80 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 14:10:58 -1000 Subject: [PATCH 33/79] fix: cannot be patched or its late --- src/zeroconf/const.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/const.py b/src/zeroconf/const.py index f39e1313..aa64306e 100644 --- a/src/zeroconf/const.py +++ b/src/zeroconf/const.py @@ -31,7 +31,7 @@ _LISTENER_TIME = 200 # ms _BROWSER_TIME = 10000 # ms _DUPLICATE_PACKET_SUPPRESSION_INTERVAL = 1000 # ms -_DUPLICATE_QUESTION_INTERVAL = _DUPLICATE_PACKET_SUPPRESSION_INTERVAL - 1 # ms +_DUPLICATE_QUESTION_INTERVAL = 999 # ms # Must be 1ms less than _DUPLICATE_PACKET_SUPPRESSION_INTERVAL _CACHE_CLEANUP_INTERVAL = 10 # s _LOADED_SYSTEM_TIMEOUT = 10 # s _STARTUP_TIMEOUT = 9 # s must be lower than _LOADED_SYSTEM_TIMEOUT From 6d31787a9df25ea7758c66e1b2fc21a27a04ea9d Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 14:14:52 -1000 Subject: [PATCH 34/79] fix: adjust tests --- tests/services/test_browser.py | 8 ++++---- tests/test_asyncio.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/services/test_browser.py b/tests/services/test_browser.py index a658ded9..bc9e2b3e 100644 --- a/tests/services/test_browser.py +++ b/tests/services/test_browser.py @@ -517,8 +517,8 @@ def _async_send_ready_queries_schedule_next(self): # patch the zeroconf current_time_millis # patch the backoff limit to prevent test running forever with patch.object(zeroconf_browser, "async_send", send), patch.object( - _services_browser, "_BROWSER_BACKOFF_LIMIT", 10 - ), patch.object(_services_browser, "_FIRST_QUERY_DELAY_RANDOM_INTERVAL", (0, 0)): + _services_browser, "_FIRST_QUERY_DELAY_RANDOM_INTERVAL", (0, 0) + ): # dummy service callback def on_service_state_change(zeroconf, service_type, state_change, name): pass @@ -536,7 +536,7 @@ def on_service_state_change(zeroconf, service_type, state_change, name): if time_offset == expected_query_time: assert got_query.is_set() got_query.clear() - if next_query_interval == _services_browser._BROWSER_BACKOFF_LIMIT: + if next_query_interval == 2**_services_browser.STARTUP_QUERIES: # Only need to test up to the point where we've seen a query # after the backoff limit has been hit break @@ -545,7 +545,7 @@ def on_service_state_change(zeroconf, service_type, state_change, name): expected_query_time = initial_query_interval else: next_query_interval = min( - 2 * next_query_interval, _services_browser._BROWSER_BACKOFF_LIMIT + 2 * next_query_interval, 2**_services_browser.STARTUP_QUERIES ) expected_query_time += next_query_interval else: diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py index 53bce4b4..efbd9ae0 100644 --- a/tests/test_asyncio.py +++ b/tests/test_asyncio.py @@ -1027,7 +1027,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): # by asking the same question over and over with patch.object(zeroconf_browser, "async_send", send), patch( "zeroconf._services.browser.current_time_millis", _new_current_time_millis - ), patch.object(_services_browser, "_BROWSER_BACKOFF_LIMIT", int(expected_ttl / 4)): + ): service_added = asyncio.Event() service_removed = asyncio.Event() From 5a789e27c8ddc0666dde23e7ec3c4313aa1ba3e7 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 14:18:50 -1000 Subject: [PATCH 35/79] chore: add time changed helper --- tests/__init__.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/tests/__init__.py b/tests/__init__.py index 98cd901c..802310e8 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -19,9 +19,9 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ - import asyncio import socket +import time from functools import lru_cache from typing import List, Set @@ -30,6 +30,8 @@ from zeroconf import DNSIncoming, DNSQuestion, DNSRecord, Zeroconf from zeroconf._history import QuestionHistory +_MONOTONIC_RESOLUTION = time.get_clock_info("monotonic").resolution + class QuestionHistoryWithoutSuppression(QuestionHistory): def suppresses(self, question: DNSQuestion, now: float, known_answers: Set[DNSRecord]) -> bool: @@ -84,3 +86,25 @@ def has_working_ipv6(): def _clear_cache(zc: Zeroconf) -> None: zc.cache.cache.clear() zc.question_history.clear() + + +def time_changed_millis(millis: float | None = None) -> None: + """Call all scheduled events for a time.""" + loop = asyncio.get_running_loop() + loop_time = loop.time() + if millis is not None: + mock_seconds_into_future = millis / 1000 + else: + mock_seconds_into_future = loop_time + + for task in list(loop._scheduled): # type: ignore[attr-defined] + if not isinstance(task, asyncio.TimerHandle): + continue + if task.cancelled(): + continue + + future_seconds = task.when() - (loop_time + _MONOTONIC_RESOLUTION) + + if mock_seconds_into_future >= future_seconds: + task._run() + task.cancel() From 31d1b66e5727373e3959dfb94589effab13e5edd Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 15:18:31 -1000 Subject: [PATCH 36/79] fix: names and test --- src/zeroconf/_services/browser.pxd | 5 +- src/zeroconf/_services/browser.py | 12 ++- tests/__init__.py | 21 +++-- tests/test_asyncio.py | 138 +++++++++-------------------- 4 files changed, 64 insertions(+), 112 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 47569dc6..4b444d99 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -17,7 +17,6 @@ cdef cython.uint _TYPE_PTR cdef object _CLASS_IN cdef object SERVICE_STATE_CHANGE_ADDED, SERVICE_STATE_CHANGE_REMOVED, SERVICE_STATE_CHANGE_UPDATED cdef cython.set _ADDRESS_RECORD_TYPES -cdef cython.uint STARTUP_QUERIES cdef object _MDNS_PORT, _BROWSER_TIME @@ -61,8 +60,8 @@ cdef class QueryScheduler: cdef double _min_time_between_queries_millis cdef object _loop cdef unsigned int _startup_queries_sent - cdef dict _next_scheduled_for_name - cdef list _query_heap + cdef public dict _next_scheduled_for_name + cdef public list _query_heap cdef object _next_run cdef double _clock_resolution_millis diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 2c3f9516..3cb08c6f 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -113,6 +113,10 @@ def __init__(self, name: str, type_: str, when_millis: float) -> None: self.cancelled = False self.when_millis = when_millis + def __repr__(self) -> str: + """Return a string representation of the scheduled query.""" + return f"<{self.__class__.__name__} {self.name} {self.type_} cancelled={self.cancelled} when={self.when_millis}>" + def __lt__(self, other: '_ScheduledQuery') -> bool: """Compare two scheduled queries.""" return self.when_millis < other.when_millis @@ -329,18 +333,18 @@ def schedule(self, pointer: DNSPointer) -> None: def _schedule(self, pointer: DNSPointer, expire_time: float_) -> None: """Schedule a query for a pointer.""" scheduled_query = _ScheduledQuery(pointer.alias, pointer.name, expire_time) - self._next_scheduled_for_name[pointer.name] = scheduled_query + self._next_scheduled_for_name[pointer.alias] = scheduled_query heappush(self._query_heap, scheduled_query) def cancel(self, pointer: DNSPointer) -> None: """Cancel a query for a pointer.""" - scheduled = self._next_scheduled_for_name.pop(pointer.name, None) + scheduled = self._next_scheduled_for_name.pop(pointer.alias, None) if scheduled: scheduled.cancelled = True def reschedule(self, pointer: DNSPointer) -> None: """Reschedule a query for a pointer.""" - current = self._next_scheduled_for_name.get(pointer.name) + current = self._next_scheduled_for_name.get(pointer.alias) expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) if current is not None: # If the expire time is within self._min_time_between_queries_millis @@ -394,7 +398,7 @@ def _process_ready_types(self) -> None: if query.when_millis > end_time_millis: next_scheduled = query break - heappop(self._query_heap) + query = heappop(self._query_heap) del self._next_scheduled_for_name[query.name] ready_types.add(query.type_) diff --git a/tests/__init__.py b/tests/__init__.py index 802310e8..b93be761 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -24,6 +24,7 @@ import time from functools import lru_cache from typing import List, Set +from unittest import mock import ifaddr @@ -97,14 +98,16 @@ def time_changed_millis(millis: float | None = None) -> None: else: mock_seconds_into_future = loop_time - for task in list(loop._scheduled): # type: ignore[attr-defined] - if not isinstance(task, asyncio.TimerHandle): - continue - if task.cancelled(): - continue + with mock.patch("time.monotonic", return_value=mock_seconds_into_future): - future_seconds = task.when() - (loop_time + _MONOTONIC_RESOLUTION) + for task in list(loop._scheduled): # type: ignore[attr-defined] + if not isinstance(task, asyncio.TimerHandle): + continue + if task.cancelled(): + continue - if mock_seconds_into_future >= future_seconds: - task._run() - task.cancel() + future_seconds = task.when() - (loop_time + _MONOTONIC_RESOLUTION) + + if mock_seconds_into_future >= future_seconds: + task._run() + task.cancel() diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py index efbd9ae0..00400488 100644 --- a/tests/test_asyncio.py +++ b/tests/test_asyncio.py @@ -8,7 +8,6 @@ import os import socket import threading -import time from typing import cast from unittest.mock import ANY, call, patch @@ -44,7 +43,12 @@ ) from zeroconf.const import _LISTENER_TIME -from . import QuestionHistoryWithoutSuppression, _clear_cache, has_working_ipv6 +from . import ( + QuestionHistoryWithoutSuppression, + _clear_cache, + has_working_ipv6, + time_changed_millis, +) log = logging.getLogger('zeroconf') original_logging_level = logging.NOTSET @@ -991,20 +995,21 @@ def on_service_state_change(zeroconf, service_type, state_change, name): # we are going to patch the zeroconf send to check packet sizes old_send = zeroconf_browser.async_send - time_offset = 0.0 - - def _new_current_time_millis(): - """Current system time in milliseconds""" - return (time.monotonic() * 1000) + (time_offset * 1000) - - expected_ttl = const._DNS_HOST_TTL + expected_ttl = const._DNS_OTHER_TTL nbr_answers = 0 + answers = [] def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): """Sends an outgoing packet.""" pout = DNSIncoming(out.packets()[0]) + import pprint + + pprint.pprint(pout) + last_answers = pout.answers() + answers.append(last_answers) + nonlocal nbr_answers - for answer in pout.answers(): + for answer in last_answers: nbr_answers += 1 if not answer.ttl > expected_ttl / 2: unexpected_ttl.set() @@ -1025,9 +1030,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): # patch the backoff limit to ensure we always get one query every 1/4 of the DNS TTL # Disable duplicate question suppression and duplicate packet suppression for this test as it works # by asking the same question over and over - with patch.object(zeroconf_browser, "async_send", send), patch( - "zeroconf._services.browser.current_time_millis", _new_current_time_millis - ): + with patch.object(zeroconf_browser, "async_send", send): service_added = asyncio.Event() service_removed = asyncio.Event() @@ -1039,30 +1042,37 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): ) task = await aio_zeroconf_registrar.async_register_service(info) await task - + loop = asyncio.get_running_loop() try: await asyncio.wait_for(service_added.wait(), 1) assert service_added.is_set() + # Make sure the startup queries are sent + original_now = loop.time() + now_millis = original_now * 1000 + for query_count in range(_services_browser.STARTUP_QUERIES): + now_millis += (2**query_count) * 1000 + time_changed_millis(now_millis) + + got_query.clear() + now_millis = original_now * 1000 + assert not unexpected_ttl.is_set() + # Move time forward past when the TTL is no longer + # fresh (AKA 75% of the TTL) + now_millis += (expected_ttl * 1000) / 0.80 + time_changed_millis(now_millis) - # Test that we receive queries containing answers only if the remaining TTL - # is greater than half the original TTL - sleep_count = 0 - test_iterations = 50 - - while nbr_answers < test_iterations: - # Increase simulated time shift by 1/4 of the TTL in seconds - time_offset += expected_ttl / 4 - now = _new_current_time_millis() - # Force the next query to be sent since we are testing - # to see if the query contains answers and not the scheduler - browser.query_scheduler._force_reschedule_type(type_, now + (1000 * expected_ttl)) - browser.reschedule_type(type_, now, now) - sleep_count += 1 - await asyncio.wait_for(got_query.wait(), 1) - got_query.clear() - # Prevent the test running indefinitely in an error condition - assert sleep_count < test_iterations * 4 + await asyncio.wait_for(got_query.wait(), 1) assert not unexpected_ttl.is_set() + + assert len(answers) == _services_browser.STARTUP_QUERIES + 1 + # The first question should have no known answers + assert len(answers[0]) == 0 + # The rest of the startup questions should have + # known answers + for answer_list in answers[1:-2]: + assert len(answer_list) == 1 + # Once the TTL is reached, the last question should have no known answers + assert len(answers[-1]) == 0 # Don't remove service, allow close() to cleanup finally: await aio_zeroconf_registrar.async_close() @@ -1305,67 +1315,3 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de ('add', '_http._tcp.local.', 'ShellyPro4PM-94B97EC07650._http._tcp.local.'), ('update', '_http._tcp.local.', 'ShellyPro4PM-94B97EC07650._http._tcp.local.'), ] - - -@pytest.mark.asyncio -async def test_service_browser_does_not_try_to_send_if_not_ready(): - """Test that the service browser does not try to send if not ready when rescheduling a type.""" - service_added = asyncio.Event() - type_ = "_http._tcp.local." - registration_name = "nosend.%s" % type_ - - def on_service_state_change(zeroconf, service_type, state_change, name): - if name == registration_name: - if state_change is ServiceStateChange.Added: - service_added.set() - - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) - zeroconf_browser = aiozc.zeroconf - await zeroconf_browser.async_wait_for_start() - - expected_ttl = const._DNS_HOST_TTL - time_offset = 0.0 - - def _new_current_time_millis(): - """Current system time in milliseconds""" - return (time.monotonic() * 1000) + (time_offset * 1000) - - assert len(zeroconf_browser.engine.protocols) == 2 - - aio_zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1']) - zeroconf_registrar = aio_zeroconf_registrar.zeroconf - await aio_zeroconf_registrar.zeroconf.async_wait_for_start() - assert len(zeroconf_registrar.engine.protocols) == 2 - with patch("zeroconf._services.browser.current_time_millis", _new_current_time_millis): - service_added = asyncio.Event() - browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change]) - desc = {'path': '/~paulsm/'} - info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] - ) - task = await aio_zeroconf_registrar.async_register_service(info) - await task - - try: - await asyncio.wait_for(service_added.wait(), 1) - time_offset = 1000 * expected_ttl # set the time to the end of the ttl - now = _new_current_time_millis() - browser.query_scheduler._force_reschedule_type(type_, now + (1000 * expected_ttl)) - # Make sure the query schedule is to a time in the future - # so we will reschedule - with patch.object( - browser, "_async_send_ready_queries" - ) as _async_send_ready_queries, patch.object( - browser, "_async_send_ready_queries_schedule_next" - ) as _async_send_ready_queries_schedule_next: - # Reschedule the type to be sent in 1ms in the future - # to make sure the query is not sent - browser.reschedule_type(type_, now, now + 1) - assert not _async_send_ready_queries.called - await asyncio.sleep(0.01) - # Make sure it does happen after the sleep - assert _async_send_ready_queries_schedule_next.called - finally: - await aio_zeroconf_registrar.async_close() - await browser.async_cancel() - await aiozc.async_close() From 3b3fac04f64f861ec358e9f3a37e5ba6a3ab5130 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 15:20:47 -1000 Subject: [PATCH 37/79] fix: fixes --- tests/test_asyncio.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py index 00400488..d5994060 100644 --- a/tests/test_asyncio.py +++ b/tests/test_asyncio.py @@ -1025,20 +1025,21 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): await aio_zeroconf_registrar.zeroconf.async_wait_for_start() assert len(zeroconf_registrar.engine.protocols) == 2 - # patch the zeroconf send - # patch the zeroconf current_time_millis - # patch the backoff limit to ensure we always get one query every 1/4 of the DNS TTL - # Disable duplicate question suppression and duplicate packet suppression for this test as it works - # by asking the same question over and over + # patch the zeroconf send so we can capture what is being sent with patch.object(zeroconf_browser, "async_send", send): service_added = asyncio.Event() service_removed = asyncio.Event() browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change]) - - desc = {'path': '/~paulsm/'} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + {'path': '/~paulsm/'}, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) task = await aio_zeroconf_registrar.async_register_service(info) await task From af8b8bc0cfa28bd8a68830d4a6328405ef36e4c0 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 15:26:45 -1000 Subject: [PATCH 38/79] fix: history --- src/zeroconf/_history.pxd | 10 +++++----- tests/test_history.py | 9 +++++++-- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/zeroconf/_history.pxd b/src/zeroconf/_history.pxd index c1ff7619..02a0fc9e 100644 --- a/src/zeroconf/_history.pxd +++ b/src/zeroconf/_history.pxd @@ -9,10 +9,10 @@ cdef class QuestionHistory: cdef cython.dict _history - cpdef add_question_at_time(self, DNSQuestion question, float now, cython.set known_answers) + cpdef add_question_at_time(self, DNSQuestion question, double now, cython.set known_answers) - @cython.locals(than=cython.double, previous_question=cython.tuple, previous_known_answers=cython.set) - cpdef bint suppresses(self, DNSQuestion question, cython.double now, cython.set known_answers) + @cython.locals(than=double, previous_question=cython.tuple, previous_known_answers=cython.set) + cpdef bint suppresses(self, DNSQuestion question, double now, cython.set known_answers) - @cython.locals(than=cython.double, now_known_answers=cython.tuple) - cpdef async_expire(self, cython.double now) + @cython.locals(than=double, now_known_answers=cython.tuple) + cpdef async_expire(self, double now) diff --git a/tests/test_history.py b/tests/test_history.py index a8b8ae14..fca57be2 100644 --- a/tests/test_history.py +++ b/tests/test_history.py @@ -47,11 +47,16 @@ def test_question_suppression(): def test_question_expire(): history = QuestionHistory() - question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN) now = r.current_time_millis() + question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN) other_known_answers: Set[r.DNSRecord] = { r.DNSPointer( - "_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, 'known-to-other._hap._tcp.local.' + "_hap._tcp.local.", + const._TYPE_PTR, + const._CLASS_IN, + 10000, + 'known-to-other._hap._tcp.local.', + created=now, ) } history.add_question_at_time(question, now, other_known_answers) From d7c8ecc77c8ee5a916e5c7f54c632b4931c8c215 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 15:30:34 -1000 Subject: [PATCH 39/79] fix: rename to prevent errors --- src/zeroconf/_services/browser.pxd | 4 +-- src/zeroconf/_services/browser.py | 40 +++++++++++++++--------------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 4b444d99..ccf28d35 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -26,7 +26,7 @@ cdef object _FLAGS_QR_QUERY cdef object heappop, heappush -cdef class _ScheduledQuery: +cdef class _ScheduledPTRQuery: cdef public str name cdef public str type_ @@ -60,7 +60,7 @@ cdef class QueryScheduler: cdef double _min_time_between_queries_millis cdef object _loop cdef unsigned int _startup_queries_sent - cdef public dict _next_scheduled_for_name + cdef public dict _next_scheduled_for_alias cdef public list _query_heap cdef object _next_run cdef double _clock_resolution_millis diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 3cb08c6f..421e6852 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -102,44 +102,44 @@ heappush = heapq.heappush -class _ScheduledQuery: +class _ScheduledPTRQuery: - __slots__ = ('name', 'type_', 'cancelled', 'when_millis') + __slots__ = ('alias', 'name', 'cancelled', 'when_millis') - def __init__(self, name: str, type_: str, when_millis: float) -> None: + def __init__(self, alias: str, name: str, when_millis: float) -> None: """Create a scheduled query.""" + self.alias = alias self.name = name - self.type_ = type_ self.cancelled = False self.when_millis = when_millis def __repr__(self) -> str: """Return a string representation of the scheduled query.""" - return f"<{self.__class__.__name__} {self.name} {self.type_} cancelled={self.cancelled} when={self.when_millis}>" + return f"<{self.__class__.__name__} alias={self.alias} name={self.name} cancelled={self.cancelled} when={self.when_millis}>" - def __lt__(self, other: '_ScheduledQuery') -> bool: + def __lt__(self, other: '_ScheduledPTRQuery') -> bool: """Compare two scheduled queries.""" return self.when_millis < other.when_millis def __le__(self, other: Any) -> bool: """Compare two scheduled queries.""" - if isinstance(other, _ScheduledQuery): + if isinstance(other, _ScheduledPTRQuery): return self.when_millis < other.when_millis or self.__eq__(other) return NotImplemented def __eq__(self, other: Any) -> bool: """Compare two scheduled queries.""" - if not isinstance(other, _ScheduledQuery): + if not isinstance(other, _ScheduledPTRQuery): return NotImplemented return self.when_millis == other.when_millis def __ge__(self, other: Any) -> bool: """Compare two scheduled queries.""" - if isinstance(other, _ScheduledQuery): + if isinstance(other, _ScheduledPTRQuery): return self.when_millis > other.when_millis or self.__eq__(other) return NotImplemented - def __gt__(self, other: '_ScheduledQuery') -> bool: + def __gt__(self, other: '_ScheduledPTRQuery') -> bool: """Compare two scheduled queries.""" return self.when_millis > other.when_millis @@ -284,7 +284,7 @@ class QueryScheduler: '_min_time_between_queries_millis', '_loop', '_startup_queries_sent', - '_next_scheduled_for_name', + '_next_scheduled_for_alias', '_query_heap', '_next_run', '_clock_resolution_millis', @@ -301,8 +301,8 @@ def __init__( self._min_time_between_queries_millis = delay self._loop: Optional[asyncio.AbstractEventLoop] = None self._startup_queries_sent = 0 - self._next_scheduled_for_name: Dict[str, _ScheduledQuery] = {} - self._query_heap: list[_ScheduledQuery] = [] + self._next_scheduled_for_alias: Dict[str, _ScheduledPTRQuery] = {} + self._query_heap: list[_ScheduledPTRQuery] = [] self._next_run: Optional[asyncio.TimerHandle] = None self._clock_resolution_millis = time.get_clock_info('monotonic').resolution * 1000 @@ -332,19 +332,19 @@ def schedule(self, pointer: DNSPointer) -> None: def _schedule(self, pointer: DNSPointer, expire_time: float_) -> None: """Schedule a query for a pointer.""" - scheduled_query = _ScheduledQuery(pointer.alias, pointer.name, expire_time) - self._next_scheduled_for_name[pointer.alias] = scheduled_query + scheduled_query = _ScheduledPTRQuery(pointer.alias, pointer.name, expire_time) + self._next_scheduled_for_alias[pointer.alias] = scheduled_query heappush(self._query_heap, scheduled_query) def cancel(self, pointer: DNSPointer) -> None: """Cancel a query for a pointer.""" - scheduled = self._next_scheduled_for_name.pop(pointer.alias, None) + scheduled = self._next_scheduled_for_alias.pop(pointer.alias, None) if scheduled: scheduled.cancelled = True def reschedule(self, pointer: DNSPointer) -> None: """Reschedule a query for a pointer.""" - current = self._next_scheduled_for_name.get(pointer.alias) + current = self._next_scheduled_for_alias.get(pointer.alias) expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) if current is not None: # If the expire time is within self._min_time_between_queries_millis @@ -387,7 +387,7 @@ def _process_ready_types(self) -> None: # which defaults to 10s ready_types: Set[str] = set() - next_scheduled: Optional[_ScheduledQuery] = None + next_scheduled: Optional[_ScheduledPTRQuery] = None end_time_millis = now_millis + self._clock_resolution_millis while self._query_heap: @@ -399,8 +399,8 @@ def _process_ready_types(self) -> None: next_scheduled = query break query = heappop(self._query_heap) - del self._next_scheduled_for_name[query.name] - ready_types.add(query.type_) + del self._next_scheduled_for_alias[query.alias] + ready_types.add(query.name) if ready_types: self._browser.async_send_ready_queries(False, now_millis, ready_types) From 846a6e7a51c2ddb8ccdc0ec53da28e89a5f71e01 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 15:41:25 -1000 Subject: [PATCH 40/79] fix: refactor test --- tests/services/test_browser.py | 264 ++++++++++++--------------------- 1 file changed, 96 insertions(+), 168 deletions(-) diff --git a/tests/services/test_browser.py b/tests/services/test_browser.py index bc9e2b3e..5d4fafee 100644 --- a/tests/services/test_browser.py +++ b/tests/services/test_browser.py @@ -10,7 +10,7 @@ import time import unittest from threading import Event -from typing import Iterable, Set, cast +from typing import Iterable, List, Set, cast from unittest.mock import patch import pytest @@ -29,13 +29,14 @@ from zeroconf._services import ServiceStateChange from zeroconf._services.browser import ServiceBrowser from zeroconf._services.info import ServiceInfo -from zeroconf.asyncio import AsyncZeroconf +from zeroconf.asyncio import AsyncServiceBrowser, AsyncZeroconf from .. import ( QuestionHistoryWithoutSuppression, _inject_response, _wait_for_start, has_working_ipv6, + time_changed_millis, ) log = logging.getLogger('zeroconf') @@ -472,93 +473,6 @@ def _mock_get_expiration_time(self, percent): zeroconf.close() -def test_backoff(): - got_query = Event() - - type_ = "_http._tcp.local." - zeroconf_browser = Zeroconf(interfaces=['127.0.0.1']) - _wait_for_start(zeroconf_browser) - zeroconf_browser.question_history = QuestionHistoryWithoutSuppression() - - # we are going to patch the zeroconf send to check query transmission - old_send = zeroconf_browser.async_send - - time_offset = 0.0 - start_time = time.monotonic() * 1000 - initial_query_interval = _services_browser._BROWSER_TIME / 1000 - - def _current_time_millis(): - """Current system time in milliseconds""" - return start_time + time_offset * 1000 - - def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): - """Sends an outgoing packet.""" - got_query.set() - old_send(out, addr=addr, port=port, v6_flow_scope=v6_flow_scope) - - class ServiceBrowserWithPatchedTime(_services_browser.ServiceBrowser): - def _async_start(self) -> None: - """Generate the next time and setup listeners. - - Must be called by uses of this base class after they - have finished setting their properties. - """ - super()._async_start() - self.query_scheduler.start(_current_time_millis()) - - def _async_send_ready_queries_schedule_next(self): - if self.done or self.zc.done: - return - now = _current_time_millis() - self._async_send_ready_queries(now) - self._async_schedule_next(now) - - # patch the zeroconf send - # patch the zeroconf current_time_millis - # patch the backoff limit to prevent test running forever - with patch.object(zeroconf_browser, "async_send", send), patch.object( - _services_browser, "_FIRST_QUERY_DELAY_RANDOM_INTERVAL", (0, 0) - ): - # dummy service callback - def on_service_state_change(zeroconf, service_type, state_change, name): - pass - - browser = ServiceBrowserWithPatchedTime(zeroconf_browser, type_, [on_service_state_change]) - - try: - # Test that queries are sent at increasing intervals - sleep_count = 0 - next_query_interval = 0.0 - expected_query_time = 0.0 - while True: - sleep_count += 1 - got_query.wait(0.1) - if time_offset == expected_query_time: - assert got_query.is_set() - got_query.clear() - if next_query_interval == 2**_services_browser.STARTUP_QUERIES: - # Only need to test up to the point where we've seen a query - # after the backoff limit has been hit - break - elif next_query_interval == 0: - next_query_interval = initial_query_interval - expected_query_time = initial_query_interval - else: - next_query_interval = min( - 2 * next_query_interval, 2**_services_browser.STARTUP_QUERIES - ) - expected_query_time += next_query_interval - else: - assert not got_query.is_set() - time_offset += initial_query_interval - assert zeroconf_browser.loop is not None - zeroconf_browser.loop.call_soon_threadsafe(browser._async_send_ready_queries_schedule_next) - - finally: - browser.cancel() - zeroconf_browser.close() - - def test_first_query_delay(): """Verify the first query is delayed. @@ -598,41 +512,102 @@ def on_service_state_change(zeroconf, service_type, state_change, name): zeroconf_browser.close() -def test_asking_default_is_asking_qm_questions_after_the_first_qu(): - """Verify the service browser's first question is QU and subsequent ones are QM questions.""" - type_ = "_quservice._tcp.local." - zeroconf_browser = Zeroconf(interfaces=['127.0.0.1']) +@pytest.mark.asyncio +async def test_asking_default_is_asking_qm_questions_after_the_first_qu(): + """Verify the service browser's first questions are QU and refresh queries are QM.""" + service_added = asyncio.Event() + service_removed = asyncio.Event() + unexpected_ttl = asyncio.Event() + got_query = asyncio.Event() - # we are going to patch the zeroconf send to check query transmission + type_ = "_http._tcp.local." + registration_name = "xxxyyy.%s" % type_ + + def on_service_state_change(zeroconf, service_type, state_change, name): + if name == registration_name: + if state_change is ServiceStateChange.Added: + service_added.set() + elif state_change is ServiceStateChange.Removed: + service_removed.set() + + aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + zeroconf_browser = aiozc.zeroconf + zeroconf_browser.question_history = QuestionHistoryWithoutSuppression() + await zeroconf_browser.async_wait_for_start() + + # we are going to patch the zeroconf send to check packet sizes old_send = zeroconf_browser.async_send - first_outgoing = None - second_outgoing = None + expected_ttl = const._DNS_OTHER_TTL + questions: List[List[DNSQuestion]] = [] - def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT): + def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): """Sends an outgoing packet.""" - nonlocal first_outgoing - nonlocal second_outgoing - if first_outgoing is not None and second_outgoing is None: # type: ignore[unreachable] - second_outgoing = out # type: ignore[unreachable] - if first_outgoing is None: - first_outgoing = out - old_send(out, addr=addr, port=port) + pout = r.DNSIncoming(out.packets()[0]) + questions.append(pout.questions) + got_query.set() + old_send(out, addr=addr, port=port, v6_flow_scope=v6_flow_scope) - # patch the zeroconf send - with patch.object(zeroconf_browser, "async_send", send): - # dummy service callback - def on_service_state_change(zeroconf, service_type, state_change, name): - pass + assert len(zeroconf_browser.engine.protocols) == 2 + + aio_zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1']) + zeroconf_registrar = aio_zeroconf_registrar.zeroconf + await aio_zeroconf_registrar.zeroconf.async_wait_for_start() - browser = ServiceBrowser(zeroconf_browser, type_, [on_service_state_change], delay=5) - time.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 120 + 50)) + assert len(zeroconf_registrar.engine.protocols) == 2 + # patch the zeroconf send so we can capture what is being sent + with patch.object(zeroconf_browser, "async_send", send): + service_added = asyncio.Event() + service_removed = asyncio.Event() + + browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change]) + info = ServiceInfo( + type_, + registration_name, + 80, + 0, + 0, + {'path': '/~paulsm/'}, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], + ) + task = await aio_zeroconf_registrar.async_register_service(info) + await task + loop = asyncio.get_running_loop() try: - assert first_outgoing.questions[0].unicast is True # type: ignore[union-attr] - assert second_outgoing.questions[0].unicast is False # type: ignore[attr-defined] + await asyncio.wait_for(service_added.wait(), 1) + assert service_added.is_set() + # Make sure the startup queries are sent + original_now = loop.time() + now_millis = original_now * 1000 + for query_count in range(_services_browser.STARTUP_QUERIES): + now_millis += (2**query_count) * 1000 + time_changed_millis(now_millis) + + got_query.clear() + now_millis = original_now * 1000 + assert not unexpected_ttl.is_set() + # Move time forward past when the TTL is no longer + # fresh (AKA 75% of the TTL) + now_millis += (expected_ttl * 1000) / 0.80 + time_changed_millis(now_millis) + + await asyncio.wait_for(got_query.wait(), 1) + assert not unexpected_ttl.is_set() + + assert len(questions) == _services_browser.STARTUP_QUERIES + 1 + # The startup questions should be QU questions + for question in questions[0:-2]: + assert question[0].unicast is True + # The refresh questions should be QM questions + assert questions[-1][0].unicast is False + # Don't remove service, allow close() to cleanup finally: - browser.cancel() - zeroconf_browser.close() + await aio_zeroconf_registrar.async_close() + await asyncio.wait_for(service_removed.wait(), 1) + assert service_removed.is_set() + await browser.async_cancel() + await aiozc.async_close() def test_asking_qm_questions(): @@ -1010,86 +985,39 @@ async def test_generate_service_query_suppress_duplicate_questions(): assert zc.question_history.suppresses(question, now, other_known_answers) # The known answer list is different, do not suppress - outs = _services_browser.generate_service_query(zc, now, [name], multicast=True, question_type=None) + outs = _services_browser.generate_service_query(zc, now, {name}, multicast=True, question_type=None) assert outs zc.cache.async_add_records([answer]) # The known answer list contains all the asked questions in the history # we should suppress - outs = _services_browser.generate_service_query(zc, now, [name], multicast=True, question_type=None) + outs = _services_browser.generate_service_query(zc, now, {name}, multicast=True, question_type=None) assert not outs # We do not suppress once the question history expires outs = _services_browser.generate_service_query( - zc, now + 1000, [name], multicast=True, question_type=None + zc, now + 1000, {name}, multicast=True, question_type=None ) assert outs # We do not suppress QU queries ever - outs = _services_browser.generate_service_query(zc, now, [name], multicast=False, question_type=None) + outs = _services_browser.generate_service_query(zc, now, {name}, multicast=False, question_type=None) assert outs zc.question_history.async_expire(now + 2000) # No suppression after clearing the history - outs = _services_browser.generate_service_query(zc, now, [name], multicast=True, question_type=None) + outs = _services_browser.generate_service_query(zc, now, {name}, multicast=True, question_type=None) assert outs # The previous query we just sent is still remembered and # the next one is suppressed - outs = _services_browser.generate_service_query(zc, now, [name], multicast=True, question_type=None) + outs = _services_browser.generate_service_query(zc, now, {name}, multicast=True, question_type=None) assert not outs await aiozc.async_close() -@pytest.mark.asyncio -async def test_query_scheduler(): - delay = const._BROWSER_TIME - types_ = {"_hap._tcp.local.", "_http._tcp.local."} - query_scheduler = _services_browser.QueryScheduler(types_, delay, (0, 0)) - - now = current_time_millis() - query_scheduler.start(now) - - # Test query interval is increasing - assert query_scheduler.millis_to_wait(now - 1) == 1 - assert query_scheduler.millis_to_wait(now) == 0 - assert query_scheduler.millis_to_wait(now + 1) == 0 - - assert set(query_scheduler.process_ready_types(now)) == types_ - assert set(query_scheduler.process_ready_types(now)) == set() - assert query_scheduler.millis_to_wait(now) == pytest.approx(delay, 0.00001) - - assert set(query_scheduler.process_ready_types(now + delay)) == types_ - assert set(query_scheduler.process_ready_types(now + delay)) == set() - assert query_scheduler.millis_to_wait(now) == pytest.approx(delay * 3, 0.00001) - - assert set(query_scheduler.process_ready_types(now + delay * 3)) == types_ - assert set(query_scheduler.process_ready_types(now + delay * 3)) == set() - assert query_scheduler.millis_to_wait(now) == pytest.approx(delay * 7, 0.00001) - - assert set(query_scheduler.process_ready_types(now + delay * 7)) == types_ - assert set(query_scheduler.process_ready_types(now + delay * 7)) == set() - assert query_scheduler.millis_to_wait(now) == pytest.approx(delay * 15, 0.00001) - - assert set(query_scheduler.process_ready_types(now + delay * 15)) == types_ - assert set(query_scheduler.process_ready_types(now + delay * 15)) == set() - - # Test if we reschedule 1 second later, the millis_to_wait goes up by 1 - query_scheduler.reschedule_type("_hap._tcp.local.", now + delay * 16) - assert query_scheduler.millis_to_wait(now) == pytest.approx(delay * 16, 0.00001) - - assert set(query_scheduler.process_ready_types(now + delay * 15)) == set() - - # Test if we reschedule 1 second later... and its ready for processing - assert set(query_scheduler.process_ready_types(now + delay * 16)) == {"_hap._tcp.local."} - assert query_scheduler.millis_to_wait(now) == pytest.approx(delay * 31, 0.00001) - assert set(query_scheduler.process_ready_types(now + delay * 20)) == set() - - assert set(query_scheduler.process_ready_types(now + delay * 31)) == {"_http._tcp.local."} - - def test_service_browser_matching(): """Test that the ServiceBrowser matching does not match partial names.""" From e054a9848887515c09b5028aed2050068ade0876 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 15:48:33 -1000 Subject: [PATCH 41/79] fix: move guards --- src/zeroconf/_services/browser.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 421e6852..0d7ecc30 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -361,6 +361,9 @@ def reschedule(self, pointer: DNSPointer) -> None: def _process_startup_queries(self) -> None: if TYPE_CHECKING: assert self._loop is not None + if self._browser.zc.done: + return + now_millis = current_time_millis() # At first we will send 3 queries to get the cache populated @@ -380,6 +383,9 @@ def _process_ready_types(self) -> None: """Generate a list of ready types that is due and schedule the next time.""" if TYPE_CHECKING: assert self._loop is not None + if self._browser.zc.done: + return + now_millis = current_time_millis() # Refresh records that are about to expire (aka # _EXPIRE_REFRESH_TIME_PERCENT which is currently 75% of the TTL) and @@ -628,8 +634,6 @@ def async_send_ready_queries( self, first_request: bool, now_millis: float_, ready_types: Set[str] ) -> None: """Send any ready queries.""" - if self.done or self.zc.done: - return # If they did not specify and this is the first request, ask QU questions # https://datatracker.ietf.org/doc/html/rfc6762#section-5.4 since we are # just starting up and we know our cache is likely empty. This ensures From af1a2322ff37854fe1b664e245921b47adbf4753 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 15:49:32 -1000 Subject: [PATCH 42/79] fix: move guards --- src/zeroconf/_services/browser.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 0d7ecc30..21546f22 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -361,6 +361,8 @@ def reschedule(self, pointer: DNSPointer) -> None: def _process_startup_queries(self) -> None: if TYPE_CHECKING: assert self._loop is not None + # This is a safety to ensure we stop sending queries if Zeroconf instance + # is stopped without the browser being cancelled if self._browser.zc.done: return @@ -383,6 +385,8 @@ def _process_ready_types(self) -> None: """Generate a list of ready types that is due and schedule the next time.""" if TYPE_CHECKING: assert self._loop is not None + # This is a safety to ensure we stop sending queries if Zeroconf instance + # is stopped without the browser being cancelled if self._browser.zc.done: return From 43716e63a5b8e6515ba8b3c6f23b98007066e67f Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 15:51:48 -1000 Subject: [PATCH 43/79] fix: remove dead code --- src/zeroconf/_services/browser.pxd | 1 - src/zeroconf/_services/browser.py | 3 --- 2 files changed, 4 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index ccf28d35..c1c73453 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -94,7 +94,6 @@ cdef class _ServiceBrowserBase(RecordUpdateListener): cdef public object _service_state_changed cdef public QueryScheduler query_scheduler cdef public bint done - cdef public bint _first_request cdef public object _next_send_timer cdef public object _query_sender_task diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 21546f22..b8329a12 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -441,7 +441,6 @@ class _ServiceBrowserBase(RecordUpdateListener): '_service_state_changed', 'query_scheduler', 'done', - '_first_request', '_next_send_timer', '_query_sender_task', ) @@ -491,7 +490,6 @@ def __init__( self._service_state_changed = Signal() self.query_scheduler = QueryScheduler(self, delay, _FIRST_QUERY_DELAY_RANDOM_INTERVAL) self.done = False - self._first_request: bool = True self._next_send_timer: Optional[asyncio.TimerHandle] = None self._query_sender_task: Optional[asyncio.Task] = None @@ -645,7 +643,6 @@ def async_send_ready_queries( question_type = QU_QUESTION if self.question_type is None and first_request else self.question_type outs = generate_service_query(self.zc, now_millis, ready_types, self.multicast, question_type) if outs: - self._first_request = False for out in outs: self.zc.async_send(out, self.addr, self.port) From d904d10a8a007906696f8456a3f917a8fdb951ff Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 15:53:29 -1000 Subject: [PATCH 44/79] fix: legacy --- tests/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index b93be761..cbba6073 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -23,7 +23,7 @@ import socket import time from functools import lru_cache -from typing import List, Set +from typing import List, Optional, Set from unittest import mock import ifaddr @@ -89,7 +89,7 @@ def _clear_cache(zc: Zeroconf) -> None: zc.question_history.clear() -def time_changed_millis(millis: float | None = None) -> None: +def time_changed_millis(millis: Optional[float] = None) -> None: """Call all scheduled events for a time.""" loop = asyncio.get_running_loop() loop_time = loop.time() From dc938551fe10872ce245a5f4d3e29e8667708a4b Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 15:55:41 -1000 Subject: [PATCH 45/79] fix: legacy --- src/zeroconf/_services/browser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index b8329a12..eb4ceff6 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -408,7 +408,7 @@ def _process_ready_types(self) -> None: if query.when_millis > end_time_millis: next_scheduled = query break - query = heappop(self._query_heap) + heappop(self._query_heap) del self._next_scheduled_for_alias[query.alias] ready_types.add(query.name) From e89f64e893c69acafcf1593be2f1577a7aa8daba Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 15:57:42 -1000 Subject: [PATCH 46/79] fix: break ref cycles on stop --- src/zeroconf/_services/browser.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index eb4ceff6..5858fb79 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -325,6 +325,8 @@ def stop(self) -> None: if self._next_run is not None: self._next_run.cancel() self._next_run = None + self._next_scheduled_for_alias.clear() + self._query_heap.clear() def schedule(self, pointer: DNSPointer) -> None: """Schedule a query for a pointer.""" From c69b8906a4252ee5deaf8ceb40f5394a8ac7170b Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 16:21:56 -1000 Subject: [PATCH 47/79] fix: debug --- src/zeroconf/_services/browser.pxd | 6 +++--- src/zeroconf/_services/browser.py | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index c1c73453..80faf0b6 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -69,15 +69,15 @@ cdef class QueryScheduler: cdef void _schedule(self, DNSPointer pointer, double expire_time) - @cython.locals(scheduled=_ScheduledQuery) + @cython.locals(scheduled=_ScheduledPTRQuery) cpdef void cancel(self, DNSPointer pointer) - @cython.locals(current=_ScheduledQuery, expire_time=double) + @cython.locals(current=_ScheduledPTRQuery, expire_time=double) cpdef void reschedule(self, DNSPointer pointer) cpdef void _process_startup_queries(self) - @cython.locals(query=_ScheduledQuery, next_scheduled=_ScheduledQuery, next_when=double) + @cython.locals(query=_ScheduledPTRQuery, next_scheduled=_ScheduledPTRQuery, next_when=double) cpdef void _process_ready_types(self) cdef class _ServiceBrowserBase(RecordUpdateListener): diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 5858fb79..eb3f0561 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -561,17 +561,23 @@ def async_update_records(self, zc: 'Zeroconf', now: float_, records: List[Record record_type = record.type if record_type is _TYPE_PTR: + import pprint + + pprint.pprint(['got new record', record_type, record]) if TYPE_CHECKING: record = cast(DNSPointer, record) pointer = record for type_ in self.types.intersection(cached_possible_types(pointer.name)): if old_record is None: + pprint.pprint(['added', pointer]) self._enqueue_callback(SERVICE_STATE_CHANGE_ADDED, type_, pointer.alias) self.query_scheduler.schedule(pointer) elif pointer.is_expired(now): + pprint.pprint(['removed', pointer]) self._enqueue_callback(SERVICE_STATE_CHANGE_REMOVED, type_, pointer.alias) self.query_scheduler.cancel(pointer) else: + pprint.pprint(['updated', pointer]) self.query_scheduler.reschedule(pointer) continue From 605f0f81d8c3c74f4384782c41f78781010eaad5 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Thu, 14 Dec 2023 16:23:45 -1000 Subject: [PATCH 48/79] fix: names --- src/zeroconf/_services/browser.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 80faf0b6..93bd8af8 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -28,8 +28,8 @@ cdef object heappop, heappush cdef class _ScheduledPTRQuery: + cdef public str alias cdef public str name - cdef public str type_ cdef public bint cancelled cdef public double when_millis From 9f8a2f1fbaebeffabbf60fc6441e7eb6b5cd5781 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 08:29:13 -1000 Subject: [PATCH 49/79] fix: handle devices that ignore QU --- src/zeroconf/_services/browser.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index eb3f0561..3d7ba204 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -86,7 +86,7 @@ QU_QUESTION = DNSQuestionType.QU -STARTUP_QUERIES = 5 +STARTUP_QUERIES = 4 if TYPE_CHECKING: from .._core import Zeroconf @@ -370,8 +370,10 @@ def _process_startup_queries(self) -> None: now_millis = current_time_millis() - # At first we will send 3 queries to get the cache populated - self._browser.async_send_ready_queries(True, now_millis, self._browser.types) + # At first we will send STARTUP_QUERIES queries to get the cache populated + self._browser.async_send_ready_queries( + self._startup_queries_sent == 0, now_millis, self._browser.types + ) self._startup_queries_sent += 1 # Once we finish sending the initial queries we will From 59adfbca950e26a6d428fbe6c797b1a5119163a3 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 08:33:47 -1000 Subject: [PATCH 50/79] fix: revert QU change --- tests/services/test_browser.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/services/test_browser.py b/tests/services/test_browser.py index 5d4fafee..75ad3a51 100644 --- a/tests/services/test_browser.py +++ b/tests/services/test_browser.py @@ -596,11 +596,15 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): assert not unexpected_ttl.is_set() assert len(questions) == _services_browser.STARTUP_QUERIES + 1 - # The startup questions should be QU questions - for question in questions[0:-2]: - assert question[0].unicast is True - # The refresh questions should be QM questions - assert questions[-1][0].unicast is False + # The first question should be QU to try to + # populate the known answers and limit the impact + # of the QM questions that follow. We still + # have to ask QM questions for the startup queries + # because some devices will not respond to QU + assert questions[0][0].unicast is True + # The remaining questions should be QM questions + for question in questions[1:]: + assert question[0].unicast is False # Don't remove service, allow close() to cleanup finally: await aio_zeroconf_registrar.async_close() From 8037cedd01800bce6cb2396ab1a4518207a51484 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 08:34:21 -1000 Subject: [PATCH 51/79] fix: remove debug --- src/zeroconf/_services/browser.py | 6 ------ tests/test_asyncio.py | 3 --- 2 files changed, 9 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 3d7ba204..d0a71d32 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -563,23 +563,17 @@ def async_update_records(self, zc: 'Zeroconf', now: float_, records: List[Record record_type = record.type if record_type is _TYPE_PTR: - import pprint - - pprint.pprint(['got new record', record_type, record]) if TYPE_CHECKING: record = cast(DNSPointer, record) pointer = record for type_ in self.types.intersection(cached_possible_types(pointer.name)): if old_record is None: - pprint.pprint(['added', pointer]) self._enqueue_callback(SERVICE_STATE_CHANGE_ADDED, type_, pointer.alias) self.query_scheduler.schedule(pointer) elif pointer.is_expired(now): - pprint.pprint(['removed', pointer]) self._enqueue_callback(SERVICE_STATE_CHANGE_REMOVED, type_, pointer.alias) self.query_scheduler.cancel(pointer) else: - pprint.pprint(['updated', pointer]) self.query_scheduler.reschedule(pointer) continue diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py index d5994060..77b03756 100644 --- a/tests/test_asyncio.py +++ b/tests/test_asyncio.py @@ -1002,9 +1002,6 @@ def on_service_state_change(zeroconf, service_type, state_change, name): def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): """Sends an outgoing packet.""" pout = DNSIncoming(out.packets()[0]) - import pprint - - pprint.pprint(pout) last_answers = pout.answers() answers.append(last_answers) From c9262a55949bd75a2ac2fe8d17b870a4dd4670da Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 09:58:13 -1000 Subject: [PATCH 52/79] feat: retry queries until ttl reached --- src/zeroconf/_services/browser.pxd | 16 +++++-- src/zeroconf/_services/browser.py | 71 +++++++++++++++++++++++------- 2 files changed, 67 insertions(+), 20 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 93bd8af8..9492cdcf 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -17,6 +17,7 @@ cdef cython.uint _TYPE_PTR cdef object _CLASS_IN cdef object SERVICE_STATE_CHANGE_ADDED, SERVICE_STATE_CHANGE_REMOVED, SERVICE_STATE_CHANGE_UPDATED cdef cython.set _ADDRESS_RECORD_TYPES +cdef float RESCUE_RECORD_RETRY_TTL_PERCENTAGE cdef object _MDNS_PORT, _BROWSER_TIME @@ -26,11 +27,14 @@ cdef object _FLAGS_QR_QUERY cdef object heappop, heappush +cdef double _first_refresh_time(double expire_time_millis) + cdef class _ScheduledPTRQuery: cdef public str alias cdef public str name cdef public bint cancelled + cdef public double expire_time_millis cdef public double when_millis cdef class _DNSPointerOutgoingBucket: @@ -65,15 +69,19 @@ cdef class QueryScheduler: cdef object _next_run cdef double _clock_resolution_millis - cpdef void schedule(self, DNSPointer pointer) + cpdef void schedule_pointer_first_refresh(self, DNSPointer pointer) + + cdef void _schedule_pointer_first_refresh(self, DNSPointer pointer, double expire_time) - cdef void _schedule(self, DNSPointer pointer, double expire_time) + cdef void _schedule_ptr_query(self, _ScheduledPTRQuery scheduled_query) @cython.locals(scheduled=_ScheduledPTRQuery) - cpdef void cancel(self, DNSPointer pointer) + cpdef void cancel_pointer_refresh(self, DNSPointer pointer) @cython.locals(current=_ScheduledPTRQuery, expire_time=double) - cpdef void reschedule(self, DNSPointer pointer) + cpdef void reschedule_pointer_first_refresh(self, DNSPointer pointer) + + cpdef reschedule_query(self, _ScheduledPTRQuery query, double now_millis, float additional_percentage) cpdef void _process_startup_queries(self) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index d0a71d32..4365da1a 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -88,6 +88,8 @@ STARTUP_QUERIES = 4 +RESCUE_RECORD_RETRY_TTL_PERCENTAGE = 0.1 + if TYPE_CHECKING: from .._core import Zeroconf @@ -102,15 +104,21 @@ heappush = heapq.heappush +def _first_refresh_time(expire_time_millis: float_) -> float_: + """Return the refresh time from an expire time.""" + return expire_time_millis * (_EXPIRE_REFRESH_TIME_PERCENT / 100) + + class _ScheduledPTRQuery: - __slots__ = ('alias', 'name', 'cancelled', 'when_millis') + __slots__ = ('alias', 'name', 'cancelled', 'expire_time_millis', 'when_millis') - def __init__(self, alias: str, name: str, when_millis: float) -> None: + def __init__(self, alias: str, name: str, expire_time_millis: float, when_millis: float) -> None: """Create a scheduled query.""" self.alias = alias self.name = name self.cancelled = False + self.expire_time_millis = expire_time_millis self.when_millis = when_millis def __repr__(self) -> str: @@ -328,37 +336,63 @@ def stop(self) -> None: self._next_scheduled_for_alias.clear() self._query_heap.clear() - def schedule(self, pointer: DNSPointer) -> None: + def schedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: """Schedule a query for a pointer.""" - self._schedule(pointer, pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT)) + self._schedule_pointer_first_refresh(pointer, pointer.get_expiration_time(100)) - def _schedule(self, pointer: DNSPointer, expire_time: float_) -> None: + def _schedule_pointer_first_refresh(self, pointer: DNSPointer, expire_time_millis: float_) -> None: """Schedule a query for a pointer.""" - scheduled_query = _ScheduledPTRQuery(pointer.alias, pointer.name, expire_time) - self._next_scheduled_for_alias[pointer.alias] = scheduled_query + refresh_time_millis = _first_refresh_time(expire_time_millis) + scheduled_ptr_query = _ScheduledPTRQuery( + pointer.alias, pointer.name, expire_time_millis, refresh_time_millis + ) + self._schedule_ptr_query(scheduled_ptr_query) + + def _schedule_ptr_query(self, scheduled_query: _ScheduledPTRQuery) -> None: + """Schedule a query for a pointer.""" + self._next_scheduled_for_alias[scheduled_query.alias] = scheduled_query heappush(self._query_heap, scheduled_query) - def cancel(self, pointer: DNSPointer) -> None: + def cancel_pointer_refresh(self, pointer: DNSPointer) -> None: """Cancel a query for a pointer.""" scheduled = self._next_scheduled_for_alias.pop(pointer.alias, None) if scheduled: scheduled.cancelled = True - def reschedule(self, pointer: DNSPointer) -> None: + def reschedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: """Reschedule a query for a pointer.""" current = self._next_scheduled_for_alias.get(pointer.alias) - expire_time = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) + expire_time_millis = pointer.get_expiration_time(100) + refresh_time_millis = _first_refresh_time(expire_time_millis) if current is not None: # If the expire time is within self._min_time_between_queries_millis # of the current scheduled time avoid churn by not rescheduling if ( -self._min_time_between_queries_millis - <= expire_time - current.when_millis + <= refresh_time_millis - current.when_millis <= self._min_time_between_queries_millis ): return current.cancelled = True - self._schedule(pointer, expire_time) + self._schedule_pointer_first_refresh(pointer, expire_time_millis) + + def reschedule_query( + self, query: _ScheduledPTRQuery, now_millis: float_, additional_percentage: float_ + ) -> None: + """Reschedule a query for a pointer at an additional percentage of expiration.""" + next_percent_remaining = additional_percentage + ( + (query.expire_time_millis - now_millis) / query.expire_time_millis + ) + if next_percent_remaining >= 1: + # If we would schedule past the expire time + # there is no point in scheduling as we already + # tried to rescue the record and failed + return + next_query_time = query.expire_time_millis * next_percent_remaining + scheduled_ptr_query = _ScheduledPTRQuery( + query.alias, query.name, query.expire_time_millis, next_query_time + ) + self._schedule_ptr_query(scheduled_ptr_query) def _process_startup_queries(self) -> None: if TYPE_CHECKING: @@ -412,9 +446,14 @@ def _process_ready_types(self) -> None: if query.when_millis > end_time_millis: next_scheduled = query break + ready_types.add(query.name) heappop(self._query_heap) del self._next_scheduled_for_alias[query.alias] - ready_types.add(query.name) + # If there is still more than 10% of the TTL remaining + # schedule a query again to try to recuse the record + # from expiring. If the record is not refreshed before + # the query, the query will get cancelled. + self.reschedule_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) if ready_types: self._browser.async_send_ready_queries(False, now_millis, ready_types) @@ -569,12 +608,12 @@ def async_update_records(self, zc: 'Zeroconf', now: float_, records: List[Record for type_ in self.types.intersection(cached_possible_types(pointer.name)): if old_record is None: self._enqueue_callback(SERVICE_STATE_CHANGE_ADDED, type_, pointer.alias) - self.query_scheduler.schedule(pointer) + self.query_scheduler.schedule_pointer_first_refresh(pointer) elif pointer.is_expired(now): self._enqueue_callback(SERVICE_STATE_CHANGE_REMOVED, type_, pointer.alias) - self.query_scheduler.cancel(pointer) + self.query_scheduler.cancel_pointer_refresh(pointer) else: - self.query_scheduler.reschedule(pointer) + self.query_scheduler.reschedule_pointer_first_refresh(pointer) continue # If its expired or already exists in the cache it cannot be updated. From 4588f018302c37d6f3e70f2a271ae2955c391118 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 09:59:45 -1000 Subject: [PATCH 53/79] feat: retry queries until ttl reached --- src/zeroconf/_services/browser.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 9492cdcf..b29bec86 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -81,7 +81,7 @@ cdef class QueryScheduler: @cython.locals(current=_ScheduledPTRQuery, expire_time=double) cpdef void reschedule_pointer_first_refresh(self, DNSPointer pointer) - cpdef reschedule_query(self, _ScheduledPTRQuery query, double now_millis, float additional_percentage) + cpdef void reschedule_query(self, _ScheduledPTRQuery query, double now_millis, float additional_percentage) cpdef void _process_startup_queries(self) From 3910b0de44e7a67ade72bfa6b606bc5c2665c7c1 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 10:01:19 -1000 Subject: [PATCH 54/79] feat: retry queries until ttl reached --- src/zeroconf/_services/browser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 4365da1a..2ace3793 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -451,7 +451,7 @@ def _process_ready_types(self) -> None: del self._next_scheduled_for_alias[query.alias] # If there is still more than 10% of the TTL remaining # schedule a query again to try to recuse the record - # from expiring. If the record is not refreshed before + # from expiring. If the record is refreshed before # the query, the query will get cancelled. self.reschedule_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) From 615bc8b6c70dd4f3925a892171413af7cad8262b Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 10:13:12 -1000 Subject: [PATCH 55/79] chore: comments --- src/zeroconf/_services/browser.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 2ace3793..efe43ff2 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -117,8 +117,20 @@ def __init__(self, alias: str, name: str, expire_time_millis: float, when_millis """Create a scheduled query.""" self.alias = alias self.name = name + # Since queries are stored in a heap we need to track if they are cancelled + # so we can remove them from the heap when they are cancelled as it would + # be too expensive to search the heap for the record to remove and instead + # we just mark it as cancelled and ignore it when we pop it off the heap + # when the query is due. self.cancelled = False + # Expire time millis is the actual millisecond time the record will expire self.expire_time_millis = expire_time_millis + # When millis is the millisecond time the query should be sent + # For the first query this is the refresh time which is 75% of the TTL + # + # For subsequent queries we increase the time by 10% of the TTL + # until we reach the expire time and then we stop because it means + # we failed to rescue the record. self.when_millis = when_millis def __repr__(self) -> str: From 38bb27816a7142325a1918bf71fad93c2a8eb2bd Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 10:24:17 -1000 Subject: [PATCH 56/79] fix: tweak --- src/zeroconf/_services/browser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index efe43ff2..4eeb2b35 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -458,7 +458,6 @@ def _process_ready_types(self) -> None: if query.when_millis > end_time_millis: next_scheduled = query break - ready_types.add(query.name) heappop(self._query_heap) del self._next_scheduled_for_alias[query.alias] # If there is still more than 10% of the TTL remaining @@ -466,6 +465,7 @@ def _process_ready_types(self) -> None: # from expiring. If the record is refreshed before # the query, the query will get cancelled. self.reschedule_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) + ready_types.add(query.name) if ready_types: self._browser.async_send_ready_queries(False, now_millis, ready_types) From e978d6cd68ecd88269ce5ffac9970c4631f66fcc Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 10:25:42 -1000 Subject: [PATCH 57/79] fix: cleanup --- src/zeroconf/_services/browser.pxd | 2 +- src/zeroconf/_services/browser.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index b29bec86..68749b2b 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -81,7 +81,7 @@ cdef class QueryScheduler: @cython.locals(current=_ScheduledPTRQuery, expire_time=double) cpdef void reschedule_pointer_first_refresh(self, DNSPointer pointer) - cpdef void reschedule_query(self, _ScheduledPTRQuery query, double now_millis, float additional_percentage) + cpdef void schedule_next_refresh_query(self, _ScheduledPTRQuery query, double now_millis, float additional_percentage) cpdef void _process_startup_queries(self) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 4eeb2b35..0f7dcd68 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -388,7 +388,7 @@ def reschedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: current.cancelled = True self._schedule_pointer_first_refresh(pointer, expire_time_millis) - def reschedule_query( + def schedule_next_refresh_query( self, query: _ScheduledPTRQuery, now_millis: float_, additional_percentage: float_ ) -> None: """Reschedule a query for a pointer at an additional percentage of expiration.""" @@ -464,7 +464,7 @@ def _process_ready_types(self) -> None: # schedule a query again to try to recuse the record # from expiring. If the record is refreshed before # the query, the query will get cancelled. - self.reschedule_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) + self.schedule_next_refresh_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) ready_types.add(query.name) if ready_types: From 0082e417848565462b4a24d805e51d8e32055404 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 10:30:15 -1000 Subject: [PATCH 58/79] fix: tweaks --- src/zeroconf/_services/browser.pxd | 2 +- src/zeroconf/_services/browser.py | 13 ++++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 68749b2b..e26db223 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -71,7 +71,7 @@ cdef class QueryScheduler: cpdef void schedule_pointer_first_refresh(self, DNSPointer pointer) - cdef void _schedule_pointer_first_refresh(self, DNSPointer pointer, double expire_time) + cdef void _schedule_pointer_refresh(self, DNSPointer pointer, double expire_time_millis, double refresh_time_millis) cdef void _schedule_ptr_query(self, _ScheduledPTRQuery scheduled_query) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 0f7dcd68..bdf4c774 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -350,11 +350,14 @@ def stop(self) -> None: def schedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: """Schedule a query for a pointer.""" - self._schedule_pointer_first_refresh(pointer, pointer.get_expiration_time(100)) + expire_time_millis = pointer.get_expiration_time(100) + refresh_time_millis = _first_refresh_time(expire_time_millis) + self._schedule_pointer_refresh(pointer, expire_time_millis, refresh_time_millis) - def _schedule_pointer_first_refresh(self, pointer: DNSPointer, expire_time_millis: float_) -> None: + def _schedule_pointer_refresh( + self, pointer: DNSPointer, expire_time_millis: float_, refresh_time_millis: float_ + ) -> None: """Schedule a query for a pointer.""" - refresh_time_millis = _first_refresh_time(expire_time_millis) scheduled_ptr_query = _ScheduledPTRQuery( pointer.alias, pointer.name, expire_time_millis, refresh_time_millis ) @@ -386,7 +389,7 @@ def reschedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: ): return current.cancelled = True - self._schedule_pointer_first_refresh(pointer, expire_time_millis) + self._schedule_pointer_refresh(pointer, expire_time_millis, refresh_time_millis) def schedule_next_refresh_query( self, query: _ScheduledPTRQuery, now_millis: float_, additional_percentage: float_ @@ -464,7 +467,7 @@ def _process_ready_types(self) -> None: # schedule a query again to try to recuse the record # from expiring. If the record is refreshed before # the query, the query will get cancelled. - self.schedule_next_refresh_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) + # self.schedule_next_refresh_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) ready_types.add(query.name) if ready_types: From d1f7e080e0e3c8f1065f8377feef1bd3994b8849 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 10:49:37 -1000 Subject: [PATCH 59/79] fix: fixes --- src/zeroconf/_services/browser.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index bdf4c774..00993f8f 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -104,11 +104,6 @@ heappush = heapq.heappush -def _first_refresh_time(expire_time_millis: float_) -> float_: - """Return the refresh time from an expire time.""" - return expire_time_millis * (_EXPIRE_REFRESH_TIME_PERCENT / 100) - - class _ScheduledPTRQuery: __slots__ = ('alias', 'name', 'cancelled', 'expire_time_millis', 'when_millis') @@ -135,7 +130,15 @@ def __init__(self, alias: str, name: str, expire_time_millis: float, when_millis def __repr__(self) -> str: """Return a string representation of the scheduled query.""" - return f"<{self.__class__.__name__} alias={self.alias} name={self.name} cancelled={self.cancelled} when={self.when_millis}>" + return ( + f"<{self.__class__.__name__} " + f"alias={self.alias} " + f"name={self.name} " + f"cancelled={self.cancelled} " + f"expire_time_millis={self.expire_time_millis} " + f"when_millis={self.when_millis}" + ">" + ) def __lt__(self, other: '_ScheduledPTRQuery') -> bool: """Compare two scheduled queries.""" @@ -351,7 +354,7 @@ def stop(self) -> None: def schedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: """Schedule a query for a pointer.""" expire_time_millis = pointer.get_expiration_time(100) - refresh_time_millis = _first_refresh_time(expire_time_millis) + refresh_time_millis = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) self._schedule_pointer_refresh(pointer, expire_time_millis, refresh_time_millis) def _schedule_pointer_refresh( @@ -378,7 +381,7 @@ def reschedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: """Reschedule a query for a pointer.""" current = self._next_scheduled_for_alias.get(pointer.alias) expire_time_millis = pointer.get_expiration_time(100) - refresh_time_millis = _first_refresh_time(expire_time_millis) + refresh_time_millis = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) if current is not None: # If the expire time is within self._min_time_between_queries_millis # of the current scheduled time avoid churn by not rescheduling @@ -395,15 +398,14 @@ def schedule_next_refresh_query( self, query: _ScheduledPTRQuery, now_millis: float_, additional_percentage: float_ ) -> None: """Reschedule a query for a pointer at an additional percentage of expiration.""" - next_percent_remaining = additional_percentage + ( - (query.expire_time_millis - now_millis) / query.expire_time_millis - ) - if next_percent_remaining >= 1: + percentage_remaining = (query.expire_time_millis - now_millis) / query.expire_time_millis + if percentage_remaining < additional_percentage: # If we would schedule past the expire time # there is no point in scheduling as we already # tried to rescue the record and failed return - next_query_time = query.expire_time_millis * next_percent_remaining + next_percent_remaining = percentage_remaining - additional_percentage + next_query_time = query.expire_time_millis * (1 - next_percent_remaining) scheduled_ptr_query = _ScheduledPTRQuery( query.alias, query.name, query.expire_time_millis, next_query_time ) @@ -467,7 +469,7 @@ def _process_ready_types(self) -> None: # schedule a query again to try to recuse the record # from expiring. If the record is refreshed before # the query, the query will get cancelled. - # self.schedule_next_refresh_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) + self.schedule_next_refresh_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) ready_types.add(query.name) if ready_types: From 626ef58347ea0751ffb6673f7685a9185ddcbcf2 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 11:18:56 -1000 Subject: [PATCH 60/79] fix: remove unused code --- src/zeroconf/_services/browser.pxd | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index e26db223..1ea9a889 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -27,8 +27,6 @@ cdef object _FLAGS_QR_QUERY cdef object heappop, heappush -cdef double _first_refresh_time(double expire_time_millis) - cdef class _ScheduledPTRQuery: cdef public str alias From ccb53bfd2ae9acf1e05ae28e096e2c3e8b204ab5 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 11:19:57 -1000 Subject: [PATCH 61/79] fix: remove unused code --- src/zeroconf/_services/browser.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 1ea9a889..49dabdc0 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -114,4 +114,4 @@ cdef class _ServiceBrowserBase(RecordUpdateListener): cpdef async_update_records_complete(self) - cpdef async_send_ready_queries(self, bint first_request, double now_millis, set ready_types) + cpdef void async_send_ready_queries(self, bint first_request, double now_millis, set ready_types) From 45bf3813f9b6638bc81d2cde643b71efda114094 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 11:22:03 -1000 Subject: [PATCH 62/79] fix: drop call if unused --- src/zeroconf/_services/browser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 00993f8f..b9ab52e8 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -380,7 +380,6 @@ def cancel_pointer_refresh(self, pointer: DNSPointer) -> None: def reschedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: """Reschedule a query for a pointer.""" current = self._next_scheduled_for_alias.get(pointer.alias) - expire_time_millis = pointer.get_expiration_time(100) refresh_time_millis = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) if current is not None: # If the expire time is within self._min_time_between_queries_millis @@ -392,6 +391,7 @@ def reschedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: ): return current.cancelled = True + expire_time_millis = pointer.get_expiration_time(100) self._schedule_pointer_refresh(pointer, expire_time_millis, refresh_time_millis) def schedule_next_refresh_query( From 024906ac5b72493d057f1d314839aa1f7cf06ec7 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 11:24:42 -1000 Subject: [PATCH 63/79] fix: naming --- src/zeroconf/_services/browser.pxd | 2 +- src/zeroconf/_services/browser.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 49dabdc0..b1386d32 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -79,7 +79,7 @@ cdef class QueryScheduler: @cython.locals(current=_ScheduledPTRQuery, expire_time=double) cpdef void reschedule_pointer_first_refresh(self, DNSPointer pointer) - cpdef void schedule_next_refresh_query(self, _ScheduledPTRQuery query, double now_millis, float additional_percentage) + cpdef void schedule_rescue_query(self, _ScheduledPTRQuery query, double now_millis, float additional_percentage) cpdef void _process_startup_queries(self) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index b9ab52e8..db91ed63 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -394,7 +394,7 @@ def reschedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: expire_time_millis = pointer.get_expiration_time(100) self._schedule_pointer_refresh(pointer, expire_time_millis, refresh_time_millis) - def schedule_next_refresh_query( + def schedule_rescue_query( self, query: _ScheduledPTRQuery, now_millis: float_, additional_percentage: float_ ) -> None: """Reschedule a query for a pointer at an additional percentage of expiration.""" @@ -469,7 +469,7 @@ def _process_ready_types(self) -> None: # schedule a query again to try to recuse the record # from expiring. If the record is refreshed before # the query, the query will get cancelled. - self.schedule_next_refresh_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) + self.schedule_rescue_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) ready_types.add(query.name) if ready_types: From 18021eeb4b1debd3cd7d3d8d58c1fa678b6eaba0 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 11:47:45 -1000 Subject: [PATCH 64/79] chore: add units for _ScheduledPTRQuery dunder methods --- tests/services/test_browser.py | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/tests/services/test_browser.py b/tests/services/test_browser.py index 75ad3a51..302e8303 100644 --- a/tests/services/test_browser.py +++ b/tests/services/test_browser.py @@ -27,7 +27,7 @@ millis_to_seconds, ) from zeroconf._services import ServiceStateChange -from zeroconf._services.browser import ServiceBrowser +from zeroconf._services.browser import ServiceBrowser, _ScheduledPTRQuery from zeroconf._services.info import ServiceInfo from zeroconf.asyncio import AsyncServiceBrowser, AsyncZeroconf @@ -1201,3 +1201,33 @@ def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming: browser.cancel() zc.close() + + +def test_scheduled_ptr_query_dunder_methods(): + query75 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 120, 75) + query80 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 120, 80) + query75_2 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 140, 75) + other = object() + stringified = str(query75) + assert "zoomy._hap._tcp.local." in stringified + assert "120" in stringified + assert "75" in stringified + assert "ScheduledPTRQuery" in stringified + + assert query75 == query75 + assert query75 != query80 + assert query75 == query75_2 + assert query75 < query80 + assert query75 <= query80 + assert query80 > query75 + assert query80 >= query75 + + assert query75 != other + with pytest.raises(TypeError): + query75 < other # type: ignore[operator] + with pytest.raises(TypeError): + query75 <= other + with pytest.raises(TypeError): + query75 > other # type: ignore[operator] + with pytest.raises(TypeError): + query75 >= other From 3c39ec650b1c8b9b2dd904cc6325534f2e1a6ab3 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 11:55:33 -1000 Subject: [PATCH 65/79] fix: tests --- src/zeroconf/_services/browser.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index db91ed63..cca95ec7 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -142,29 +142,33 @@ def __repr__(self) -> str: def __lt__(self, other: '_ScheduledPTRQuery') -> bool: """Compare two scheduled queries.""" - return self.when_millis < other.when_millis + if type(other) is _ScheduledPTRQuery: + return self.when_millis < other.when_millis + return NotImplemented - def __le__(self, other: Any) -> bool: + def __le__(self, other: '_ScheduledPTRQuery') -> bool: """Compare two scheduled queries.""" - if isinstance(other, _ScheduledPTRQuery): + if type(other) is _ScheduledPTRQuery: return self.when_millis < other.when_millis or self.__eq__(other) return NotImplemented def __eq__(self, other: Any) -> bool: """Compare two scheduled queries.""" - if not isinstance(other, _ScheduledPTRQuery): + if type(other) is _ScheduledPTRQuery: return NotImplemented return self.when_millis == other.when_millis - def __ge__(self, other: Any) -> bool: + def __ge__(self, other: '_ScheduledPTRQuery') -> bool: """Compare two scheduled queries.""" - if isinstance(other, _ScheduledPTRQuery): + if type(other) is _ScheduledPTRQuery: return self.when_millis > other.when_millis or self.__eq__(other) return NotImplemented def __gt__(self, other: '_ScheduledPTRQuery') -> bool: """Compare two scheduled queries.""" - return self.when_millis > other.when_millis + if type(other) is _ScheduledPTRQuery: + return self.when_millis > other.when_millis + return NotImplemented class _DNSPointerOutgoingBucket: From a62901f19eb31d388fdce6205d08178aba728f63 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 11:55:59 -1000 Subject: [PATCH 66/79] fix: tests --- src/zeroconf/_services/browser.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index cca95ec7..7100b0d0 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -155,8 +155,8 @@ def __le__(self, other: '_ScheduledPTRQuery') -> bool: def __eq__(self, other: Any) -> bool: """Compare two scheduled queries.""" if type(other) is _ScheduledPTRQuery: - return NotImplemented - return self.when_millis == other.when_millis + return self.when_millis == other.when_millis + return NotImplemented def __ge__(self, other: '_ScheduledPTRQuery') -> bool: """Compare two scheduled queries.""" From e2d39da371face567b81acf8e62b90a839cf5e5d Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 12:12:19 -1000 Subject: [PATCH 67/79] feat: add stop sending tests --- tests/services/test_browser.py | 152 ++++++++++++++++++++++++++++++++- 1 file changed, 150 insertions(+), 2 deletions(-) diff --git a/tests/services/test_browser.py b/tests/services/test_browser.py index 302e8303..3aed0451 100644 --- a/tests/services/test_browser.py +++ b/tests/services/test_browser.py @@ -1226,8 +1226,156 @@ def test_scheduled_ptr_query_dunder_methods(): with pytest.raises(TypeError): query75 < other # type: ignore[operator] with pytest.raises(TypeError): - query75 <= other + query75 <= other # type: ignore[operator] with pytest.raises(TypeError): query75 > other # type: ignore[operator] with pytest.raises(TypeError): - query75 >= other + query75 >= other # type: ignore[operator] + + +@pytest.mark.asyncio +async def test_close_zeroconf_without_browser_before_start_up_queries(): + """Test that we stop sending startup queries if zeroconf is closed out from under the browser.""" + service_added = asyncio.Event() + type_ = "_http._tcp.local." + registration_name = "xxxyyy.%s" % type_ + + def on_service_state_change(zeroconf, service_type, state_change, name): + if name == registration_name: + if state_change is ServiceStateChange.Added: + service_added.set() + + aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + zeroconf_browser = aiozc.zeroconf + zeroconf_browser.question_history = QuestionHistoryWithoutSuppression() + await zeroconf_browser.async_wait_for_start() + + sends: list[r.DNSIncoming] = [] + + def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): + """Sends an outgoing packet.""" + pout = r.DNSIncoming(out.packets()[0]) + sends.append(pout) + + assert len(zeroconf_browser.engine.protocols) == 2 + + aio_zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1']) + zeroconf_registrar = aio_zeroconf_registrar.zeroconf + await aio_zeroconf_registrar.zeroconf.async_wait_for_start() + + assert len(zeroconf_registrar.engine.protocols) == 2 + # patch the zeroconf send so we can capture what is being sent + with patch.object(zeroconf_browser, "async_send", send): + service_added = asyncio.Event() + + browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change]) + info = ServiceInfo( + type_, + registration_name, + 80, + 0, + 0, + {'path': '/~paulsm/'}, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], + ) + task = await aio_zeroconf_registrar.async_register_service(info) + await task + loop = asyncio.get_running_loop() + try: + await asyncio.wait_for(service_added.wait(), 1) + assert service_added.is_set() + await aiozc.async_close() + sends.clear() + # Make sure the startup queries are sent + original_now = loop.time() + now_millis = original_now * 1000 + for query_count in range(_services_browser.STARTUP_QUERIES): + now_millis += (2**query_count) * 1000 + time_changed_millis(now_millis) + + # We should not send any queries after close + assert not sends + finally: + await aio_zeroconf_registrar.async_close() + await browser.async_cancel() + + +@pytest.mark.asyncio +async def test_close_zeroconf_without_browser_after_start_up_queries(): + """Test that we stop sending rescue queries if zeroconf is closed out from under the browser.""" + service_added = asyncio.Event() + + type_ = "_http._tcp.local." + registration_name = "xxxyyy.%s" % type_ + + def on_service_state_change(zeroconf, service_type, state_change, name): + if name == registration_name: + if state_change is ServiceStateChange.Added: + service_added.set() + + aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + zeroconf_browser = aiozc.zeroconf + zeroconf_browser.question_history = QuestionHistoryWithoutSuppression() + await zeroconf_browser.async_wait_for_start() + + sends: list[r.DNSIncoming] = [] + + def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): + """Sends an outgoing packet.""" + pout = r.DNSIncoming(out.packets()[0]) + sends.append(pout) + + assert len(zeroconf_browser.engine.protocols) == 2 + + aio_zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1']) + zeroconf_registrar = aio_zeroconf_registrar.zeroconf + await aio_zeroconf_registrar.zeroconf.async_wait_for_start() + + assert len(zeroconf_registrar.engine.protocols) == 2 + # patch the zeroconf send so we can capture what is being sent + with patch.object(zeroconf_browser, "async_send", send): + service_added = asyncio.Event() + browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change]) + expected_ttl = const._DNS_OTHER_TTL + info = ServiceInfo( + type_, + registration_name, + 80, + 0, + 0, + {'path': '/~paulsm/'}, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], + ) + task = await aio_zeroconf_registrar.async_register_service(info) + await task + loop = asyncio.get_running_loop() + try: + await asyncio.wait_for(service_added.wait(), 1) + assert service_added.is_set() + sends.clear() + # Make sure the startup queries are sent + original_now = loop.time() + now_millis = original_now * 1000 + for query_count in range(_services_browser.STARTUP_QUERIES): + now_millis += (2**query_count) * 1000 + time_changed_millis(now_millis) + + # We should not send any queries after close + assert sends + + await aiozc.async_close() + sends.clear() + + now_millis = original_now * 1000 + # Move time forward past when the TTL is no longer + # fresh (AKA 75% of the TTL) + now_millis += (expected_ttl * 1000) / 0.80 + time_changed_millis(now_millis) + + # We should not send the query after close + assert not sends + finally: + await aio_zeroconf_registrar.async_close() + await browser.async_cancel() From 3bb26ff1b69691dd23decd4ff14efd1c261356a4 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 13:02:02 -1000 Subject: [PATCH 68/79] fix: we need the ttl --- src/zeroconf/_services/browser.pxd | 1 + src/zeroconf/_services/browser.py | 34 +++++++++++++++------- tests/services/test_browser.py | 6 ++-- tests/test_asyncio.py | 46 +++++++++++++++++++++++++++--- 4 files changed, 70 insertions(+), 17 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index b1386d32..24a8faac 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -31,6 +31,7 @@ cdef class _ScheduledPTRQuery: cdef public str alias cdef public str name + cdef public unsigned int ttl cdef public bint cancelled cdef public double expire_time_millis cdef public double when_millis diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 7100b0d0..d71fa5a3 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -106,12 +106,15 @@ class _ScheduledPTRQuery: - __slots__ = ('alias', 'name', 'cancelled', 'expire_time_millis', 'when_millis') + __slots__ = ('alias', 'name', 'ttl', 'cancelled', 'expire_time_millis', 'when_millis') - def __init__(self, alias: str, name: str, expire_time_millis: float, when_millis: float) -> None: + def __init__( + self, alias: str, name: str, ttl: int, expire_time_millis: float, when_millis: float + ) -> None: """Create a scheduled query.""" self.alias = alias self.name = name + self.ttl = ttl # Since queries are stored in a heap we need to track if they are cancelled # so we can remove them from the heap when they are cancelled as it would # be too expensive to search the heap for the record to remove and instead @@ -134,6 +137,7 @@ def __repr__(self) -> str: f"<{self.__class__.__name__} " f"alias={self.alias} " f"name={self.name} " + f"ttl={self.ttl} " f"cancelled={self.cancelled} " f"expire_time_millis={self.expire_time_millis} " f"when_millis={self.when_millis}" @@ -365,8 +369,9 @@ def _schedule_pointer_refresh( self, pointer: DNSPointer, expire_time_millis: float_, refresh_time_millis: float_ ) -> None: """Schedule a query for a pointer.""" + ttl = int(pointer.ttl) if isinstance(pointer.ttl, float) else pointer.ttl scheduled_ptr_query = _ScheduledPTRQuery( - pointer.alias, pointer.name, expire_time_millis, refresh_time_millis + pointer.alias, pointer.name, ttl, expire_time_millis, refresh_time_millis ) self._schedule_ptr_query(scheduled_ptr_query) @@ -402,16 +407,16 @@ def schedule_rescue_query( self, query: _ScheduledPTRQuery, now_millis: float_, additional_percentage: float_ ) -> None: """Reschedule a query for a pointer at an additional percentage of expiration.""" - percentage_remaining = (query.expire_time_millis - now_millis) / query.expire_time_millis - if percentage_remaining < additional_percentage: + ttl_millis = query.ttl * 1000 + additional_wait = ttl_millis * additional_percentage + next_query_time = now_millis + additional_wait + if next_query_time >= query.expire_time_millis: # If we would schedule past the expire time # there is no point in scheduling as we already # tried to rescue the record and failed return - next_percent_remaining = percentage_remaining - additional_percentage - next_query_time = query.expire_time_millis * (1 - next_percent_remaining) scheduled_ptr_query = _ScheduledPTRQuery( - query.alias, query.name, query.expire_time_millis, next_query_time + query.alias, query.name, query.ttl, query.expire_time_millis, next_query_time ) self._schedule_ptr_query(scheduled_ptr_query) @@ -435,7 +440,10 @@ def _process_startup_queries(self) -> None: # switch to a strategy of sending queries only when we # need to refresh records that are about to expire if self._startup_queries_sent >= STARTUP_QUERIES: - self._process_ready_types() + self._next_run = self._loop.call_at( + millis_to_seconds(now_millis + self._min_time_between_queries_millis), + self._process_ready_types, + ) return self._next_run = self._loop.call_later(self._startup_queries_sent**2, self._process_startup_queries) @@ -458,6 +466,7 @@ def _process_ready_types(self) -> None: ready_types: Set[str] = set() next_scheduled: Optional[_ScheduledPTRQuery] = None end_time_millis = now_millis + self._clock_resolution_millis + schedule_rescue: List[_ScheduledPTRQuery] = [] while self._query_heap: query = self._query_heap[0] @@ -467,14 +476,19 @@ def _process_ready_types(self) -> None: if query.when_millis > end_time_millis: next_scheduled = query break + + ready_types.add(query.name) + heappop(self._query_heap) del self._next_scheduled_for_alias[query.alias] # If there is still more than 10% of the TTL remaining # schedule a query again to try to recuse the record # from expiring. If the record is refreshed before # the query, the query will get cancelled. + schedule_rescue.append(query) + + for query in schedule_rescue: self.schedule_rescue_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) - ready_types.add(query.name) if ready_types: self._browser.async_send_ready_queries(False, now_millis, ready_types) diff --git a/tests/services/test_browser.py b/tests/services/test_browser.py index 3aed0451..09e96e5a 100644 --- a/tests/services/test_browser.py +++ b/tests/services/test_browser.py @@ -1204,9 +1204,9 @@ def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming: def test_scheduled_ptr_query_dunder_methods(): - query75 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 120, 75) - query80 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 120, 80) - query75_2 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 140, 75) + query75 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 120, 120, 75) + query80 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 120, 120, 80) + query75_2 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 120, 140, 75) other = object() stringified = str(query75) assert "zoomy._hap._tcp.local." in stringified diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py index 77b03756..748024b6 100644 --- a/tests/test_asyncio.py +++ b/tests/test_asyncio.py @@ -998,10 +998,12 @@ def on_service_state_change(zeroconf, service_type, state_change, name): expected_ttl = const._DNS_OTHER_TTL nbr_answers = 0 answers = [] + packets = [] def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): """Sends an outgoing packet.""" pout = DNSIncoming(out.packets()[0]) + packets.append(pout) last_answers = pout.answers() answers.append(last_answers) @@ -1046,21 +1048,29 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): assert service_added.is_set() # Make sure the startup queries are sent original_now = loop.time() - now_millis = original_now * 1000 + start_millis = original_now * 1000 + + now_millis = start_millis for query_count in range(_services_browser.STARTUP_QUERIES): now_millis += (2**query_count) * 1000 time_changed_millis(now_millis) got_query.clear() - now_millis = original_now * 1000 assert not unexpected_ttl.is_set() + + assert len(packets) == _services_browser.STARTUP_QUERIES + packets.clear() + + # Wait for the first refresh query # Move time forward past when the TTL is no longer - # fresh (AKA 75% of the TTL) - now_millis += (expected_ttl * 1000) / 0.80 + # fresh (AKA ~75% of the TTL) + now_millis = start_millis + ((expected_ttl * 1000) * 0.76) time_changed_millis(now_millis) await asyncio.wait_for(got_query.wait(), 1) assert not unexpected_ttl.is_set() + assert len(packets) == 1 + packets.clear() assert len(answers) == _services_browser.STARTUP_QUERIES + 1 # The first question should have no known answers @@ -1071,6 +1081,34 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): assert len(answer_list) == 1 # Once the TTL is reached, the last question should have no known answers assert len(answers[-1]) == 0 + + got_query.clear() + packets.clear() + # Move time forward past when the TTL is no longer + # fresh (AKA 85% of the TTL) to ensure we try + # to recuse the record + now_millis = start_millis + ((expected_ttl * 1000) * 0.87) + time_changed_millis(now_millis) + + await asyncio.wait_for(got_query.wait(), 1) + assert len(packets) == 1 + assert not unexpected_ttl.is_set() + + packets.clear() + got_query.clear() + # Move time forward past when the TTL is no longer + # fresh (AKA 95% of the TTL). At this point + # nothing should get scheduled rescued because the rescue + # would exceed the TTL + now_millis = start_millis + ((expected_ttl * 1000) * 0.98) + + # Verify we don't send a query for a record that is + # past the TTL as we should not try to rescue it + # once its past the TTL + time_changed_millis(now_millis) + await asyncio.wait_for(got_query.wait(), 1) + assert len(packets) == 1 + # Don't remove service, allow close() to cleanup finally: await aio_zeroconf_registrar.async_close() From 8686dea69204cebd86cc5ed1987b3fa1ab15c352 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 13:13:27 -1000 Subject: [PATCH 69/79] chore: update comments --- src/zeroconf/_services/browser.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index d71fa5a3..2edf62fc 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -460,6 +460,7 @@ def _process_ready_types(self) -> None: now_millis = current_time_millis() # Refresh records that are about to expire (aka # _EXPIRE_REFRESH_TIME_PERCENT which is currently 75% of the TTL) and + # additional rescue queries if the 75% query failed to refresh the record # with a minimum time between queries of _min_time_between_queries # which defaults to 10s From 822a009e5488d1dfc5960bd9cba958630c4a1551 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 13:14:47 -1000 Subject: [PATCH 70/79] fix: consistant naming --- src/zeroconf/_services/browser.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 2edf62fc..a1a036f6 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -359,13 +359,13 @@ def stop(self) -> None: self._next_scheduled_for_alias.clear() self._query_heap.clear() - def schedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: + def schedule_ptr_first_refresh(self, pointer: DNSPointer) -> None: """Schedule a query for a pointer.""" expire_time_millis = pointer.get_expiration_time(100) refresh_time_millis = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) - self._schedule_pointer_refresh(pointer, expire_time_millis, refresh_time_millis) + self._schedule_ptr_refresh(pointer, expire_time_millis, refresh_time_millis) - def _schedule_pointer_refresh( + def _schedule_ptr_refresh( self, pointer: DNSPointer, expire_time_millis: float_, refresh_time_millis: float_ ) -> None: """Schedule a query for a pointer.""" @@ -380,13 +380,13 @@ def _schedule_ptr_query(self, scheduled_query: _ScheduledPTRQuery) -> None: self._next_scheduled_for_alias[scheduled_query.alias] = scheduled_query heappush(self._query_heap, scheduled_query) - def cancel_pointer_refresh(self, pointer: DNSPointer) -> None: + def cancel_ptr_refresh(self, pointer: DNSPointer) -> None: """Cancel a query for a pointer.""" scheduled = self._next_scheduled_for_alias.pop(pointer.alias, None) if scheduled: scheduled.cancelled = True - def reschedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: + def reschedule_ptr_first_refresh(self, pointer: DNSPointer) -> None: """Reschedule a query for a pointer.""" current = self._next_scheduled_for_alias.get(pointer.alias) refresh_time_millis = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT) @@ -401,7 +401,7 @@ def reschedule_pointer_first_refresh(self, pointer: DNSPointer) -> None: return current.cancelled = True expire_time_millis = pointer.get_expiration_time(100) - self._schedule_pointer_refresh(pointer, expire_time_millis, refresh_time_millis) + self._schedule_ptr_refresh(pointer, expire_time_millis, refresh_time_millis) def schedule_rescue_query( self, query: _ScheduledPTRQuery, now_millis: float_, additional_percentage: float_ @@ -644,12 +644,12 @@ def async_update_records(self, zc: 'Zeroconf', now: float_, records: List[Record for type_ in self.types.intersection(cached_possible_types(pointer.name)): if old_record is None: self._enqueue_callback(SERVICE_STATE_CHANGE_ADDED, type_, pointer.alias) - self.query_scheduler.schedule_pointer_first_refresh(pointer) + self.query_scheduler.schedule_ptr_first_refresh(pointer) elif pointer.is_expired(now): self._enqueue_callback(SERVICE_STATE_CHANGE_REMOVED, type_, pointer.alias) - self.query_scheduler.cancel_pointer_refresh(pointer) + self.query_scheduler.cancel_ptr_refresh(pointer) else: - self.query_scheduler.reschedule_pointer_first_refresh(pointer) + self.query_scheduler.reschedule_ptr_first_refresh(pointer) continue # If its expired or already exists in the cache it cannot be updated. From 7208da695b76b31d1ea2502466b0232c529557f9 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 13:24:30 -1000 Subject: [PATCH 71/79] chore: move async_send_ready_queries out of the browser itself --- src/zeroconf/_services/browser.pxd | 11 +++-- src/zeroconf/_services/browser.py | 70 ++++++++++++++++++++---------- 2 files changed, 54 insertions(+), 27 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 24a8faac..4a82d43e 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -58,7 +58,11 @@ cdef list _group_ptr_queries_with_known_answers(double now_millis, bint multicas cdef class QueryScheduler: - cdef _ServiceBrowserBase _browser + cdef object _zc + cdef set _types + cdef str _addr + cdef int _port + cdef bint _multicast cdef tuple _first_random_delay_interval cdef double _min_time_between_queries_millis cdef object _loop @@ -67,6 +71,7 @@ cdef class QueryScheduler: cdef public list _query_heap cdef object _next_run cdef double _clock_resolution_millis + cdef object _question_type cpdef void schedule_pointer_first_refresh(self, DNSPointer pointer) @@ -87,6 +92,8 @@ cdef class QueryScheduler: @cython.locals(query=_ScheduledPTRQuery, next_scheduled=_ScheduledPTRQuery, next_when=double) cpdef void _process_ready_types(self) + cpdef void async_send_ready_queries(self, bint first_request, double now_millis, set ready_types) + cdef class _ServiceBrowserBase(RecordUpdateListener): cdef public cython.set types @@ -114,5 +121,3 @@ cdef class _ServiceBrowserBase(RecordUpdateListener): cpdef _fire_service_state_changed_event(self, cython.tuple event) cpdef async_update_records_complete(self) - - cpdef void async_send_ready_queries(self, bint first_request, double now_millis, set ready_types) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index a1a036f6..5221c9c7 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -310,7 +310,11 @@ class QueryScheduler: """ __slots__ = ( - '_browser', + '_zc', + '_types', + '_addr', + '_port', + '_multicast', '_first_random_delay_interval', '_min_time_between_queries_millis', '_loop', @@ -319,15 +323,25 @@ class QueryScheduler: '_query_heap', '_next_run', '_clock_resolution_millis', + '_question_type', ) def __init__( self, - browser: "_ServiceBrowserBase", + zc: Zeroconf, + types: Set[str], + addr: Optional[str], + port: int, + multicast: bool, delay: int, first_random_delay_interval: Tuple[int, int], + question_type: Optional[DNSQuestionType], ) -> None: - self._browser = browser + self._zc = zc + self._types = types + self._addr = addr + self._port = port + self._multicast = multicast self._first_random_delay_interval = first_random_delay_interval self._min_time_between_queries_millis = delay self._loop: Optional[asyncio.AbstractEventLoop] = None @@ -336,6 +350,7 @@ def __init__( self._query_heap: list[_ScheduledPTRQuery] = [] self._next_run: Optional[asyncio.TimerHandle] = None self._clock_resolution_millis = time.get_clock_info('monotonic').resolution * 1000 + self._question_type = question_type def start(self, loop: asyncio.AbstractEventLoop) -> None: """Start the scheduler. @@ -425,15 +440,13 @@ def _process_startup_queries(self) -> None: assert self._loop is not None # This is a safety to ensure we stop sending queries if Zeroconf instance # is stopped without the browser being cancelled - if self._browser.zc.done: + if self._zc.done: return now_millis = current_time_millis() # At first we will send STARTUP_QUERIES queries to get the cache populated - self._browser.async_send_ready_queries( - self._startup_queries_sent == 0, now_millis, self._browser.types - ) + self.async_send_ready_queries(self._startup_queries_sent == 0, now_millis, self._types) self._startup_queries_sent += 1 # Once we finish sending the initial queries we will @@ -454,7 +467,7 @@ def _process_ready_types(self) -> None: assert self._loop is not None # This is a safety to ensure we stop sending queries if Zeroconf instance # is stopped without the browser being cancelled - if self._browser.zc.done: + if self._zc.done: return now_millis = current_time_millis() @@ -492,7 +505,7 @@ def _process_ready_types(self) -> None: self.schedule_rescue_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE) if ready_types: - self._browser.async_send_ready_queries(False, now_millis, ready_types) + self.async_send_ready_queries(False, now_millis, ready_types) next_time_millis = now_millis + self._min_time_between_queries_millis @@ -503,6 +516,20 @@ def _process_ready_types(self) -> None: self._next_run = self._loop.call_at(millis_to_seconds(next_when_millis), self._process_ready_types) + def async_send_ready_queries( + self, first_request: bool, now_millis: float_, ready_types: Set[str] + ) -> None: + """Send any ready queries.""" + # If they did not specify and this is the first request, ask QU questions + # https://datatracker.ietf.org/doc/html/rfc6762#section-5.4 since we are + # just starting up and we know our cache is likely empty. This ensures + # the next outgoing will be sent with the known answers list. + question_type = QU_QUESTION if self._question_type is None and first_request else self._question_type + outs = generate_service_query(self._zc, now_millis, ready_types, self._multicast, question_type) + if outs: + for out in outs: + self._zc.async_send(out, self._addr, self._port) + class _ServiceBrowserBase(RecordUpdateListener): """Base class for ServiceBrowser.""" @@ -567,7 +594,16 @@ def __init__( self.question_type = question_type self._pending_handlers: Dict[Tuple[str, str], ServiceStateChange] = {} self._service_state_changed = Signal() - self.query_scheduler = QueryScheduler(self, delay, _FIRST_QUERY_DELAY_RANDOM_INTERVAL) + self.query_scheduler = QueryScheduler( + zc, + self.types, + addr, + port, + self.multicast, + delay, + _FIRST_QUERY_DELAY_RANDOM_INTERVAL, + question_type, + ) self.done = False self._next_send_timer: Optional[asyncio.TimerHandle] = None self._query_sender_task: Optional[asyncio.Task] = None @@ -711,20 +747,6 @@ async def _async_start_query_sender(self) -> None: await self.zc.async_wait_for_start() self.query_scheduler.start(self._loop) - def async_send_ready_queries( - self, first_request: bool, now_millis: float_, ready_types: Set[str] - ) -> None: - """Send any ready queries.""" - # If they did not specify and this is the first request, ask QU questions - # https://datatracker.ietf.org/doc/html/rfc6762#section-5.4 since we are - # just starting up and we know our cache is likely empty. This ensures - # the next outgoing will be sent with the known answers list. - question_type = QU_QUESTION if self.question_type is None and first_request else self.question_type - outs = generate_service_query(self.zc, now_millis, ready_types, self.multicast, question_type) - if outs: - for out in outs: - self.zc.async_send(out, self.addr, self.port) - class ServiceBrowser(_ServiceBrowserBase, threading.Thread): """Used to browse for a service of a specific type. From 30259f2ca6a4abf726335069ced5771ad4e0ea17 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 13:28:25 -1000 Subject: [PATCH 72/79] fix: remove object vars that are moved to the query scheduler --- src/zeroconf/_services/browser.pxd | 5 ----- src/zeroconf/_services/browser.py | 13 ++----------- 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 4a82d43e..c2590cfc 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -100,15 +100,10 @@ cdef class _ServiceBrowserBase(RecordUpdateListener): cdef public object zc cdef DNSCache _cache cdef object _loop - cdef public object addr - cdef public object port - cdef public bint multicast - cdef public object question_type cdef public cython.dict _pending_handlers cdef public object _service_state_changed cdef public QueryScheduler query_scheduler cdef public bint done - cdef public object _next_send_timer cdef public object _query_sender_task cpdef _enqueue_callback(self, object state_change, object type_, object name) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 5221c9c7..8eba34e0 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -539,15 +539,10 @@ class _ServiceBrowserBase(RecordUpdateListener): 'zc', '_cache', '_loop', - 'addr', - 'port', - 'multicast', - 'question_type', '_pending_handlers', '_service_state_changed', 'query_scheduler', 'done', - '_next_send_timer', '_query_sender_task', ) @@ -588,10 +583,6 @@ def __init__( self._cache = zc.cache assert zc.loop is not None self._loop = zc.loop - self.addr = addr - self.port = port - self.multicast = self.addr in (None, _MDNS_ADDR, _MDNS_ADDR6) - self.question_type = question_type self._pending_handlers: Dict[Tuple[str, str], ServiceStateChange] = {} self._service_state_changed = Signal() self.query_scheduler = QueryScheduler( @@ -599,13 +590,12 @@ def __init__( self.types, addr, port, - self.multicast, + addr in (None, _MDNS_ADDR, _MDNS_ADDR6), delay, _FIRST_QUERY_DELAY_RANDOM_INTERVAL, question_type, ) self.done = False - self._next_send_timer: Optional[asyncio.TimerHandle] = None self._query_sender_task: Optional[asyncio.Task] = None if hasattr(handlers, 'add_service'): @@ -740,6 +730,7 @@ def _async_cancel(self) -> None: self.zc.async_remove_listener(self) assert self._query_sender_task is not None, "Attempted to cancel a browser that was not started" self._query_sender_task.cancel() + self._query_sender_task = None async def _async_start_query_sender(self) -> None: """Start scheduling queries.""" From 3b7e0d52680fbffc30a593fe91ed739b88ea5635 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 13:31:02 -1000 Subject: [PATCH 73/79] fix: remove object vars that are moved to the query scheduler --- src/zeroconf/_services/browser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 8eba34e0..c6c661e6 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -328,7 +328,7 @@ class QueryScheduler: def __init__( self, - zc: Zeroconf, + zc: "Zeroconf", types: Set[str], addr: Optional[str], port: int, From be0fe641215eb98c96e50820590cddc71524c9e1 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 13:33:40 -1000 Subject: [PATCH 74/79] fix: cythonize --- src/zeroconf/_services/browser.pxd | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index c2590cfc..14550e64 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -73,17 +73,17 @@ cdef class QueryScheduler: cdef double _clock_resolution_millis cdef object _question_type - cpdef void schedule_pointer_first_refresh(self, DNSPointer pointer) + cpdef void schedule_ptr_first_refresh(self, DNSPointer pointer) - cdef void _schedule_pointer_refresh(self, DNSPointer pointer, double expire_time_millis, double refresh_time_millis) + cdef void _schedule_ptr_refresh(self, DNSPointer pointer, double expire_time_millis, double refresh_time_millis) cdef void _schedule_ptr_query(self, _ScheduledPTRQuery scheduled_query) @cython.locals(scheduled=_ScheduledPTRQuery) - cpdef void cancel_pointer_refresh(self, DNSPointer pointer) + cpdef void cancel_ptr_refresh(self, DNSPointer pointer) @cython.locals(current=_ScheduledPTRQuery, expire_time=double) - cpdef void reschedule_pointer_first_refresh(self, DNSPointer pointer) + cpdef void reschedule_ptr_first_refresh(self, DNSPointer pointer) cpdef void schedule_rescue_query(self, _ScheduledPTRQuery query, double now_millis, float additional_percentage) From 8bb748650017ddc0170a7b1050e7464f2f31a464 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 14:03:53 -1000 Subject: [PATCH 75/79] chore: add more types --- src/zeroconf/_services/browser.pxd | 1 + 1 file changed, 1 insertion(+) diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 52dcbab5..88a5321d 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -85,6 +85,7 @@ cdef class QueryScheduler: @cython.locals(current=_ScheduledPTRQuery, expire_time=double) cpdef void reschedule_ptr_first_refresh(self, DNSPointer pointer) + @cython.locals(ttl_millis='unsigned int', additional_wait=double, next_query_time=double) cpdef void schedule_rescue_query(self, _ScheduledPTRQuery query, double now_millis, float additional_percentage) cpdef void _process_startup_queries(self) From ae0665f51ec7f1ba6bf47979e4b6f3aa92f9f521 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 14:39:46 -1000 Subject: [PATCH 76/79] fix: tests --- tests/services/test_browser.py | 203 +++++++++++++++++++++++++-------- 1 file changed, 153 insertions(+), 50 deletions(-) diff --git a/tests/services/test_browser.py b/tests/services/test_browser.py index 09e96e5a..beb36b3e 100644 --- a/tests/services/test_browser.py +++ b/tests/services/test_browser.py @@ -54,6 +54,13 @@ def teardown_module(): log.setLevel(original_logging_level) +def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming: + generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) + for record in records: + generated.add_answer_at_time(record, 0) + return r.DNSIncoming(generated.packets()[0]) + + def test_service_browser_cancel_multiple_times(): """Test we can cancel a ServiceBrowser multiple times before close.""" @@ -214,7 +221,7 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de assert service_info.server.lower() == service_server.lower() service_updated_event.set() - def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncoming: + def mock_record_update_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncoming: generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) assert generated.is_response() is True @@ -292,7 +299,7 @@ def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncomi wait_time = 3 # service added - _inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Added)) + _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Added)) service_add_event.wait(wait_time) assert service_added_count == 1 assert service_updated_count == 0 @@ -301,7 +308,7 @@ def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncomi # service SRV updated service_updated_event.clear() service_server = 'ash-2.local.' - _inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated)) + _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated)) service_updated_event.wait(wait_time) assert service_added_count == 1 assert service_updated_count == 1 @@ -310,7 +317,7 @@ def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncomi # service TXT updated service_updated_event.clear() service_text = b'path=/~matt2/' - _inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated)) + _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated)) service_updated_event.wait(wait_time) assert service_added_count == 1 assert service_updated_count == 2 @@ -319,7 +326,7 @@ def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncomi # service TXT updated - duplicate update should not trigger another service_updated service_updated_event.clear() service_text = b'path=/~matt2/' - _inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated)) + _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated)) service_updated_event.wait(wait_time) assert service_added_count == 1 assert service_updated_count == 2 @@ -330,7 +337,7 @@ def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncomi service_address = '10.0.1.3' # Verify we match on uppercase service_server = service_server.upper() - _inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated)) + _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated)) service_updated_event.wait(wait_time) assert service_added_count == 1 assert service_updated_count == 3 @@ -341,14 +348,14 @@ def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncomi service_server = 'ash-3.local.' service_text = b'path=/~matt3/' service_address = '10.0.1.3' - _inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated)) + _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated)) service_updated_event.wait(wait_time) assert service_added_count == 1 assert service_updated_count == 4 assert service_removed_count == 0 # service removed - _inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Removed)) + _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Removed)) service_removed_event.wait(wait_time) assert service_added_count == 1 assert service_updated_count == 4 @@ -386,7 +393,7 @@ def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de if service_removed_count == 3: service_removed_event.set() - def mock_incoming_msg( + def mock_record_update_incoming_msg( service_state_change: r.ServiceStateChange, service_type: str, service_name: str, ttl: int ) -> r.DNSIncoming: generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) @@ -404,11 +411,15 @@ def mock_incoming_msg( # all three services added _inject_response( zeroconf, - mock_incoming_msg(r.ServiceStateChange.Added, service_types[0], service_names[0], 120), + mock_record_update_incoming_msg( + r.ServiceStateChange.Added, service_types[0], service_names[0], 120 + ), ) _inject_response( zeroconf, - mock_incoming_msg(r.ServiceStateChange.Added, service_types[1], service_names[1], 120), + mock_record_update_incoming_msg( + r.ServiceStateChange.Added, service_types[1], service_names[1], 120 + ), ) time.sleep(0.1) @@ -425,14 +436,18 @@ def _mock_get_expiration_time(self, percent): with patch("zeroconf.DNSRecord.get_expiration_time", new=_mock_get_expiration_time): _inject_response( zeroconf, - mock_incoming_msg(r.ServiceStateChange.Added, service_types[0], service_names[0], 120), + mock_record_update_incoming_msg( + r.ServiceStateChange.Added, service_types[0], service_names[0], 120 + ), ) # Add the last record after updating the first one # to ensure the service_add_event only gets set # after the update _inject_response( zeroconf, - mock_incoming_msg(r.ServiceStateChange.Added, service_types[2], service_names[2], 120), + mock_record_update_incoming_msg( + r.ServiceStateChange.Added, service_types[2], service_names[2], 120 + ), ) service_add_event.wait(wait_time) assert called_with_refresh_time_check is True @@ -441,21 +456,29 @@ def _mock_get_expiration_time(self, percent): _inject_response( zeroconf, - mock_incoming_msg(r.ServiceStateChange.Updated, service_types[0], service_names[0], 0), + mock_record_update_incoming_msg( + r.ServiceStateChange.Updated, service_types[0], service_names[0], 0 + ), ) # all three services removed _inject_response( zeroconf, - mock_incoming_msg(r.ServiceStateChange.Removed, service_types[0], service_names[0], 0), + mock_record_update_incoming_msg( + r.ServiceStateChange.Removed, service_types[0], service_names[0], 0 + ), ) _inject_response( zeroconf, - mock_incoming_msg(r.ServiceStateChange.Removed, service_types[1], service_names[1], 0), + mock_record_update_incoming_msg( + r.ServiceStateChange.Removed, service_types[1], service_names[1], 0 + ), ) _inject_response( zeroconf, - mock_incoming_msg(r.ServiceStateChange.Removed, service_types[2], service_names[2], 0), + mock_record_update_incoming_msg( + r.ServiceStateChange.Removed, service_types[2], service_names[2], 0 + ), ) service_removed_event.wait(wait_time) assert service_added_count == 3 @@ -589,7 +612,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): assert not unexpected_ttl.is_set() # Move time forward past when the TTL is no longer # fresh (AKA 75% of the TTL) - now_millis += (expected_ttl * 1000) / 0.80 + now_millis += (expected_ttl * 1000) * 0.80 time_changed_millis(now_millis) await asyncio.wait_for(got_query.wait(), 1) @@ -614,6 +637,116 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): await aiozc.async_close() +@pytest.mark.asyncio +async def test_ttl_refresh_cancelled_rescue_query(): + """Verify seeing a name again cancels the rescue query.""" + service_added = asyncio.Event() + service_removed = asyncio.Event() + unexpected_ttl = asyncio.Event() + got_query = asyncio.Event() + + type_ = "_http._tcp.local." + registration_name = "xxxyyy.%s" % type_ + + def on_service_state_change(zeroconf, service_type, state_change, name): + if name == registration_name: + if state_change is ServiceStateChange.Added: + service_added.set() + elif state_change is ServiceStateChange.Removed: + service_removed.set() + + aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + zeroconf_browser = aiozc.zeroconf + zeroconf_browser.question_history = QuestionHistoryWithoutSuppression() + await zeroconf_browser.async_wait_for_start() + + # we are going to patch the zeroconf send to check packet sizes + old_send = zeroconf_browser.async_send + + expected_ttl = const._DNS_OTHER_TTL + packets = [] + + def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): + """Sends an outgoing packet.""" + pout = r.DNSIncoming(out.packets()[0]) + packets.append(pout) + got_query.set() + old_send(out, addr=addr, port=port, v6_flow_scope=v6_flow_scope) + + assert len(zeroconf_browser.engine.protocols) == 2 + + aio_zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1']) + zeroconf_registrar = aio_zeroconf_registrar.zeroconf + await aio_zeroconf_registrar.zeroconf.async_wait_for_start() + + assert len(zeroconf_registrar.engine.protocols) == 2 + # patch the zeroconf send so we can capture what is being sent + with patch.object(zeroconf_browser, "async_send", send): + service_added = asyncio.Event() + service_removed = asyncio.Event() + + browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change]) + info = ServiceInfo( + type_, + registration_name, + 80, + 0, + 0, + {'path': '/~paulsm/'}, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], + ) + task = await aio_zeroconf_registrar.async_register_service(info) + await task + loop = asyncio.get_running_loop() + try: + await asyncio.wait_for(service_added.wait(), 1) + assert service_added.is_set() + # Make sure the startup queries are sent + original_now = loop.time() + now_millis = original_now * 1000 + for query_count in range(_services_browser.STARTUP_QUERIES): + now_millis += (2**query_count) * 1000 + time_changed_millis(now_millis) + + now_millis = original_now * 1000 + assert not unexpected_ttl.is_set() + await asyncio.wait_for(got_query.wait(), 1) + got_query.clear() + assert len(packets) == _services_browser.STARTUP_QUERIES + packets.clear() + + # Move time forward past when the TTL is no longer + # fresh (AKA 75% of the TTL) + now_millis += (expected_ttl * 1000) * 0.80 + # Inject a response that will reschedule + # the rescue query so it does not happen + with patch("time.monotonic", return_value=now_millis / 1000): + zeroconf_browser.record_manager.async_updates_from_response( + mock_incoming_msg([info.dns_pointer()]), + ) + + time_changed_millis(now_millis) + await asyncio.sleep(0) + + # Verify we did not send a rescue query + assert not packets + + # We should still get a recuse query once the rescheduled + # query time is reached + now_millis += (expected_ttl * 1000) * 0.76 + time_changed_millis(now_millis) + await asyncio.wait_for(got_query.wait(), 1) + assert len(packets) == 1 + # Don't remove service, allow close() to cleanup + finally: + await aio_zeroconf_registrar.async_close() + await asyncio.wait_for(service_removed.wait(), 1) + assert service_removed.is_set() + await browser.async_cancel() + await aiozc.async_close() + + def test_asking_qm_questions(): """Verify explictly asking QM questions.""" type_ = "_quservice._tcp.local." @@ -767,12 +900,6 @@ def on_service_state_change(zeroconf, service_type, state_change, name): address = socket.inet_aton(address_parsed) info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address]) - def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming: - generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) - for record in records: - generated.add_answer_at_time(record, 0) - return r.DNSIncoming(generated.packets()[0]) - _inject_response( zc, mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]), @@ -840,12 +967,6 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de address = socket.inet_aton(address_parsed) info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address]) - def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming: - generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) - for record in records: - generated.add_answer_at_time(record, 0) - return r.DNSIncoming(generated.packets()[0]) - _inject_response( zc, mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]), @@ -899,12 +1020,6 @@ def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de address = socket.inet_aton(address_parsed) info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address]) - def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming: - generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) - for record in records: - generated.add_answer_at_time(record, 0) - return r.DNSIncoming(generated.packets()[0]) - _inject_response( zc, mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]), @@ -927,7 +1042,7 @@ def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming: zc.close() -def test_servicebrowser_uses_non_strict_names(): +def test_service_browser_uses_non_strict_names(): """Verify we can look for technically invalid names as we cannot change what others do.""" # dummy service callback @@ -1062,12 +1177,6 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de not_match_type_, not_match_registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address] ) - def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming: - generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) - for record in records: - generated.add_answer_at_time(record, 0) - return r.DNSIncoming(generated.packets()[0]) - _inject_response( zc, mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]), @@ -1153,12 +1262,6 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de addresses=[address], ) - def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming: - generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) - for record in records: - generated.add_answer_at_time(record, 0) - return r.DNSIncoming(generated.packets()[0]) - _inject_response( zc, mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]), @@ -1371,7 +1474,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): now_millis = original_now * 1000 # Move time forward past when the TTL is no longer # fresh (AKA 75% of the TTL) - now_millis += (expected_ttl * 1000) / 0.80 + now_millis += (expected_ttl * 1000) * 0.80 time_changed_millis(now_millis) # We should not send the query after close From d65e1e4cdbd430b0e5a469ea2dd6a0e69284d8f5 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 14:44:10 -1000 Subject: [PATCH 77/79] chore: fix comments --- src/zeroconf/_services/browser.py | 2 +- tests/services/test_browser.py | 2 +- tests/test_asyncio.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index c6c661e6..4d7646a2 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -496,7 +496,7 @@ def _process_ready_types(self) -> None: heappop(self._query_heap) del self._next_scheduled_for_alias[query.alias] # If there is still more than 10% of the TTL remaining - # schedule a query again to try to recuse the record + # schedule a query again to try to rescue the record # from expiring. If the record is refreshed before # the query, the query will get cancelled. schedule_rescue.append(query) diff --git a/tests/services/test_browser.py b/tests/services/test_browser.py index beb36b3e..9df0387b 100644 --- a/tests/services/test_browser.py +++ b/tests/services/test_browser.py @@ -732,7 +732,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): # Verify we did not send a rescue query assert not packets - # We should still get a recuse query once the rescheduled + # We should still get a rescue query once the rescheduled # query time is reached now_millis += (expected_ttl * 1000) * 0.76 time_changed_millis(now_millis) diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py index 748024b6..d4594788 100644 --- a/tests/test_asyncio.py +++ b/tests/test_asyncio.py @@ -1086,7 +1086,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): packets.clear() # Move time forward past when the TTL is no longer # fresh (AKA 85% of the TTL) to ensure we try - # to recuse the record + # to rescue the record now_millis = start_millis + ((expected_ttl * 1000) * 0.87) time_changed_millis(now_millis) From 81073ed887cf527ce8e2bf4145d8f98b60872449 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 14:51:26 -1000 Subject: [PATCH 78/79] chore: fix tests --- tests/services/test_browser.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/tests/services/test_browser.py b/tests/services/test_browser.py index 9df0387b..38a7f63b 100644 --- a/tests/services/test_browser.py +++ b/tests/services/test_browser.py @@ -747,11 +747,13 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): await aiozc.async_close() -def test_asking_qm_questions(): +@pytest.mark.asyncio +async def test_asking_qm_questions(): """Verify explictly asking QM questions.""" type_ = "_quservice._tcp.local." - zeroconf_browser = Zeroconf(interfaces=['127.0.0.1']) - + aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + zeroconf_browser = aiozc.zeroconf + await zeroconf_browser.async_wait_for_start() # we are going to patch the zeroconf send to check query transmission old_send = zeroconf_browser.async_send @@ -770,21 +772,24 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT): def on_service_state_change(zeroconf, service_type, state_change, name): pass - browser = ServiceBrowser( + browser = AsyncServiceBrowser( zeroconf_browser, type_, [on_service_state_change], question_type=r.DNSQuestionType.QM ) - time.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5)) + await asyncio.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5)) try: assert first_outgoing.questions[0].unicast is False # type: ignore[union-attr] finally: - browser.cancel() - zeroconf_browser.close() + await browser.async_cancel() + await aiozc.async_close() -def test_asking_qu_questions(): +@pytest.mark.asyncio +async def test_asking_qu_questions(): """Verify the service browser can ask QU questions.""" type_ = "_quservice._tcp.local." - zeroconf_browser = Zeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + zeroconf_browser = aiozc.zeroconf + await zeroconf_browser.async_wait_for_start() # we are going to patch the zeroconf send to check query transmission old_send = zeroconf_browser.async_send @@ -804,15 +809,15 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT): def on_service_state_change(zeroconf, service_type, state_change, name): pass - browser = ServiceBrowser( + browser = AsyncServiceBrowser( zeroconf_browser, type_, [on_service_state_change], question_type=r.DNSQuestionType.QU ) - time.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5)) + await asyncio.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5)) try: assert first_outgoing.questions[0].unicast is True # type: ignore[union-attr] finally: - browser.cancel() - zeroconf_browser.close() + await browser.async_cancel() + await aiozc.async_close() def test_legacy_record_update_listener(): From 425de23468501483d1049cff9fc24f0f749b09f3 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 15 Dec 2023 16:35:07 -1000 Subject: [PATCH 79/79] chore: unit tests for query scheduler --- tests/services/test_browser.py | 162 +++++++++++++++++++++++++++++++++ 1 file changed, 162 insertions(+) diff --git a/tests/services/test_browser.py b/tests/services/test_browser.py index 38a7f63b..6a3bd398 100644 --- a/tests/services/test_browser.py +++ b/tests/services/test_browser.py @@ -1142,6 +1142,168 @@ async def test_generate_service_query_suppress_duplicate_questions(): await aiozc.async_close() +@pytest.mark.asyncio +async def test_query_scheduler(): + delay = const._BROWSER_TIME + types_ = {"_hap._tcp.local.", "_http._tcp.local."} + aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + await aiozc.zeroconf.async_wait_for_start() + zc = aiozc.zeroconf + sends: List[r.DNSIncoming] = [] + + def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): + """Sends an outgoing packet.""" + pout = r.DNSIncoming(out.packets()[0]) + sends.append(pout) + + query_scheduler = _services_browser.QueryScheduler(zc, types_, None, 0, True, delay, (0, 0), None) + loop = asyncio.get_running_loop() + + # patch the zeroconf send so we can capture what is being sent + with patch.object(zc, "async_send", send): + + query_scheduler.start(loop) + + original_now = loop.time() + now_millis = original_now * 1000 + for query_count in range(_services_browser.STARTUP_QUERIES): + now_millis += (2**query_count) * 1000 + time_changed_millis(now_millis) + + ptr_record = r.DNSPointer( + "_hap._tcp.local.", + const._TYPE_PTR, + const._CLASS_IN, + const._DNS_OTHER_TTL, + "zoomer._hap._tcp.local.", + ) + ptr2_record = r.DNSPointer( + "_hap._tcp.local.", + const._TYPE_PTR, + const._CLASS_IN, + const._DNS_OTHER_TTL, + "disappear._hap._tcp.local.", + ) + + query_scheduler.schedule_ptr_first_refresh(ptr_record) + expected_when_time = ptr_record.get_expiration_time(const._EXPIRE_REFRESH_TIME_PERCENT) + expected_expire_time = ptr_record.get_expiration_time(100) + ptr_query = _ScheduledPTRQuery( + ptr_record.alias, ptr_record.name, int(ptr_record.ttl), expected_expire_time, expected_when_time + ) + assert query_scheduler._query_heap == [ptr_query] + + query_scheduler.schedule_ptr_first_refresh(ptr2_record) + expected_when_time = ptr2_record.get_expiration_time(const._EXPIRE_REFRESH_TIME_PERCENT) + expected_expire_time = ptr2_record.get_expiration_time(100) + ptr2_query = _ScheduledPTRQuery( + ptr2_record.alias, + ptr2_record.name, + int(ptr2_record.ttl), + expected_expire_time, + expected_when_time, + ) + + assert query_scheduler._query_heap == [ptr_query, ptr2_query] + + # Simulate PTR one goodbye + + query_scheduler.cancel_ptr_refresh(ptr_record) + ptr_query.cancelled = True + + assert query_scheduler._query_heap == [ptr_query, ptr2_query] + assert query_scheduler._query_heap[0].cancelled is True + assert query_scheduler._query_heap[1].cancelled is False + + # Move time forward past when the TTL is no longer + # fresh (AKA 75% of the TTL) + now_millis += (ptr2_record.ttl * 1000) * 0.80 + time_changed_millis(now_millis) + assert len(query_scheduler._query_heap) == 1 + first_heap = query_scheduler._query_heap[0] + assert first_heap.cancelled is False + assert first_heap.alias == ptr2_record.alias + + # Move time forward past when the record expires + now_millis += (ptr2_record.ttl * 1000) * 0.20 + time_changed_millis(now_millis) + assert len(query_scheduler._query_heap) == 0 + + await aiozc.async_close() + + +@pytest.mark.asyncio +async def test_query_scheduler_rescue_records(): + delay = const._BROWSER_TIME + types_ = {"_hap._tcp.local.", "_http._tcp.local."} + aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + await aiozc.zeroconf.async_wait_for_start() + zc = aiozc.zeroconf + sends: List[r.DNSIncoming] = [] + + def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): + """Sends an outgoing packet.""" + pout = r.DNSIncoming(out.packets()[0]) + sends.append(pout) + + query_scheduler = _services_browser.QueryScheduler(zc, types_, None, 0, True, delay, (0, 0), None) + loop = asyncio.get_running_loop() + + # patch the zeroconf send so we can capture what is being sent + with patch.object(zc, "async_send", send): + + query_scheduler.start(loop) + + original_now = loop.time() + now_millis = original_now * 1000 + for query_count in range(_services_browser.STARTUP_QUERIES): + now_millis += (2**query_count) * 1000 + time_changed_millis(now_millis) + + ptr_record = r.DNSPointer( + "_hap._tcp.local.", + const._TYPE_PTR, + const._CLASS_IN, + const._DNS_OTHER_TTL, + "zoomer._hap._tcp.local.", + ) + + query_scheduler.schedule_ptr_first_refresh(ptr_record) + expected_when_time = ptr_record.get_expiration_time(const._EXPIRE_REFRESH_TIME_PERCENT) + expected_expire_time = ptr_record.get_expiration_time(100) + ptr_query = _ScheduledPTRQuery( + ptr_record.alias, ptr_record.name, int(ptr_record.ttl), expected_expire_time, expected_when_time + ) + assert query_scheduler._query_heap == [ptr_query] + assert query_scheduler._query_heap[0].cancelled is False + + # Move time forward past when the TTL is no longer + # fresh (AKA 75% of the TTL) + now_millis += (ptr_record.ttl * 1000) * 0.76 + time_changed_millis(now_millis) + assert len(query_scheduler._query_heap) == 1 + new_when = query_scheduler._query_heap[0].when_millis + assert query_scheduler._query_heap[0].cancelled is False + assert new_when >= expected_when_time + + # Move time forward again, but not enough to expire the + # record to make sure we try to rescue it + now_millis += (ptr_record.ttl * 1000) * 0.11 + time_changed_millis(now_millis) + assert len(query_scheduler._query_heap) == 1 + second_new_when = query_scheduler._query_heap[0].when_millis + assert query_scheduler._query_heap[0].cancelled is False + assert second_new_when >= new_when + + # Move time forward again, enough that we will no longer + # try to rescue the record + now_millis += (ptr_record.ttl * 1000) * 0.11 + time_changed_millis(now_millis) + assert len(query_scheduler._query_heap) == 0 + + await aiozc.async_close() + + def test_service_browser_matching(): """Test that the ServiceBrowser matching does not match partial names."""