Skip to content

Commit

Permalink
Merge pull request #5579 from Textualize/fuzzy-fix
Browse files Browse the repository at this point in the history
Fix for fuzzy matcher
  • Loading branch information
willmcgugan authored Feb 26, 2025
2 parents fd26e24 + a0597c9 commit 3955ece
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 14 deletions.
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).

## [2.1.2] - 2025-02-26

### Fixed

- Fixed command palette fuzzy search bailing too early https://github.com/Textualize/textual/pull/5579

## [2.1.1] - 2025-02-22

### Fixed
Expand Down Expand Up @@ -2773,6 +2779,7 @@ https://textual.textualize.io/blog/2022/11/08/version-040/#version-040
- New handler system for messages that doesn't require inheritance
- Improved traceback handling

[2.1.2]: https://github.com/Textualize/textual/compare/v2.1.1...v2.1.2
[2.1.1]: https://github.com/Textualize/textual/compare/v2.1.0...v2.1.1
[2.1.0]: https://github.com/Textualize/textual/compare/v2.0.4...v2.1.0
[2.0.4]: https://github.com/Textualize/textual/compare/v2.0.3...v2.0.4
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "textual"
version = "2.1.1"
version = "2.1.2"
homepage = "https://github.com/Textualize/textual"
repository = "https://github.com/Textualize/textual"
documentation = "https://textual.textualize.io/"
Expand Down
1 change: 0 additions & 1 deletion src/textual/command.py
Original file line number Diff line number Diff line change
Expand Up @@ -949,7 +949,6 @@ async def _search_for(
)
for provider in self._providers
]

# Set up a delay for showing that we're busy.
self._start_busy_countdown()

Expand Down
32 changes: 20 additions & 12 deletions src/textual/fuzzy.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

import rich.repr

from textual.cache import LRUCache
from textual.content import Content
from textual.visual import Style

Expand Down Expand Up @@ -43,8 +44,8 @@ def branch(self, offset: int) -> tuple[_Search, _Search]:
def groups(self) -> int:
"""Number of groups in offsets."""
groups = 1
last_offset = self.offsets[0]
for offset in self.offsets[1:]:
last_offset, *offsets = self.offsets
for offset in offsets:
if offset != last_offset + 1:
groups += 1
last_offset = offset
Expand All @@ -57,13 +58,17 @@ class FuzzySearch:
Unlike a regex solution, this will finds all possible matches.
"""

cache: LRUCache[tuple[str, str, bool], tuple[float, tuple[int, ...]]] = LRUCache(
1024 * 4
)

def __init__(self, case_sensitive: bool = False) -> None:
"""Initialize fuzzy search.
Args:
case_sensitive: Is the match case sensitive?
"""
self.cache: dict[tuple[str, str, bool], tuple[float, tuple[int, ...]]] = {}

self.case_sensitive = case_sensitive

def match(self, query: str, candidate: str) -> tuple[float, tuple[int, ...]]:
Expand All @@ -76,7 +81,6 @@ def match(self, query: str, candidate: str) -> tuple[float, tuple[int, ...]]:
Returns:
A pair of (score, tuple of offsets). `(0, ())` for no result.
"""

query_regex = ".*?".join(f"({escape(character)})" for character in query)
if not search(
query_regex, candidate, flags=0 if self.case_sensitive else IGNORECASE
Expand Down Expand Up @@ -124,13 +128,13 @@ def score(search: _Search) -> float:
"""
# This is a heuristic, and can be tweaked for better results
# Boost first letter matches
score: float = sum(
(2.0 if offset in first_letters else 1.0) for offset in search.offsets
offset_count = len(search.offsets)
score: float = offset_count + len(
first_letters.intersection(search.offsets)
)
# Boost to favor less groups
offset_count = len(search.offsets)
normalized_groups = (offset_count - (search.groups - 1)) / offset_count
score *= 1 + (normalized_groups**2)
score *= 1 + (normalized_groups * normalized_groups)
return score

stack: list[_Search] = [_Search()]
Expand All @@ -139,20 +143,24 @@ def score(search: _Search) -> float:
query_size = len(query)
find = candidate.find
# Limit the number of loops out of an abundance of caution.
# This would be hard to reach without contrived data.
remaining_loops = 200

# This should be hard to reach without contrived data.
remaining_loops = 10_000
while stack and (remaining_loops := remaining_loops - 1):
search = pop()
offset = find(query[search.query_offset], search.candidate_offset)
if offset != -1:
if not set(candidate[search.candidate_offset :]).issuperset(
query[search.query_offset :]
):
# Early out if there is not change of a match
continue
advance_branch, branch = search.branch(offset)
if advance_branch.query_offset == query_size:
yield score(advance_branch), advance_branch.offsets
push(branch)
else:
push(advance_branch)
push(branch)
push(advance_branch)


@rich.repr.auto
Expand Down

0 comments on commit 3955ece

Please sign in to comment.