|
5 | 5 |
|
6 | 6 | import regex as re # regex string finding/replacing
|
7 | 7 |
|
| 8 | +from urllib.parse import unquote |
| 9 | +from pathlib import Path |
| 10 | + |
8 | 11 | from .. import md2html
|
9 | 12 |
|
10 |
| -from ..lib import CreateStaticFilesFolders, WriteFileLog, simpleHash, get_html_url_prefix, retain_reference, OpenIncludedFile |
| 13 | +from ..lib import CreateStaticFilesFolders, WriteFileLog, simpleHash, get_html_url_prefix, retain_reference, OpenIncludedFile, slugify |
11 | 14 |
|
12 | 15 | from ..compiler.Templating import PopulateTemplate
|
13 | 16 | from ..core.PicknickBasket import PicknickBasket
|
@@ -228,6 +231,16 @@ def convert_markdown_to_html(pb):
|
228 | 231 | if pb.gc("toggles/features/embedded_search/enabled", cached=True):
|
229 | 232 | esearch = EmbeddedSearch(json_data=pb.search.OutputJson())
|
230 | 233 |
|
| 234 | + # prepare lookup to translate slugified folder names to their original |
| 235 | + folder_og_name_lut = {} |
| 236 | + for file in pb.index.files.keys(): |
| 237 | + file_path = pb.index.files[file].path["markdown"]["file_relative_path"] |
| 238 | + for el in file_path.as_posix().split("/")[:-1]: |
| 239 | + slug_el = slugify(el) |
| 240 | + if slug_el not in folder_og_name_lut: |
| 241 | + print(slug_el, el) |
| 242 | + folder_og_name_lut[slug_el] = el |
| 243 | + |
231 | 244 | print("\t> SECOND PASS HTML")
|
232 | 245 |
|
233 | 246 | for fo in pb.index.files.values():
|
@@ -273,48 +286,63 @@ def convert_markdown_to_html(pb):
|
273 | 286 | html = md2html.insert_tags_footer(pb, html, tags, fo.md.metadata)
|
274 | 287 |
|
275 | 288 | # add breadcrumbs
|
| 289 | + # ------------------------------------------------------------------------ |
276 | 290 | if pb.gc("toggles/features/breadcrumbs/enabled", cached=True):
|
277 |
| - if node["url"] == "/index.html": |
| 291 | + html_url_prefix = pb.gc("html_url_prefix", cached=True) |
| 292 | + |
| 293 | + if node["url"] == f"{html_url_prefix}/index.html": |
| 294 | + # Don't create breadcrumbs for the homepage |
278 | 295 | snippet = ""
|
| 296 | + |
279 | 297 | else:
|
280 |
| - html_url_prefix = pb.gc("html_url_prefix", cached=True) |
| 298 | + # loop through all/links/along/the_way.html |
| 299 | + |
| 300 | + # set first element to be home |
281 | 301 | parts = [f'<a href="{html_url_prefix}/" style="color: rgb(var(--normal-text-color));">Home</a>']
|
282 | 302 |
|
283 |
| - previous_url = "" |
284 | 303 | subpaths = node["url"].replace(".html", "").split("/")[1:]
|
285 |
| - match_subpaths = subpaths |
286 | 304 |
|
287 | 305 | if pb.gc("toggles/force_filename_to_lowercase", cached=True):
|
288 |
| - match_subpaths = [x.lower() for x in subpaths] |
| 306 | + subpaths = [x.lower() for x in subpaths] |
289 | 307 |
|
290 | 308 | if html_url_prefix:
|
291 |
| - subpaths = subpaths[1:] |
292 |
| - match_subpaths = match_subpaths[1:] |
| 309 | + # remove the parts that are part of the prefix |
| 310 | + prefix_amount = len(html_url_prefix.split("/")) - 1 |
| 311 | + subpaths = subpaths[prefix_amount:] |
293 | 312 |
|
294 |
| - for i, msubpath in enumerate(match_subpaths): |
295 |
| - if i == len(msubpath) - 1: |
296 |
| - if node["url"] != previous_url: |
297 |
| - parts.append(f'<a href="{node["url"]}" ___COLOR___ >{subpaths[i]}</a>') |
298 |
| - continue |
| 313 | + previous_url = "" |
| 314 | + for i, subpath in enumerate(subpaths): |
| 315 | + subpath = unquote(subpath) |
| 316 | + if subpath in pb.index.network_tree.node_lookup: |
| 317 | + lnode = pb.index.network_tree.node_lookup[subpath] |
| 318 | + elif subpath in pb.index.network_tree.node_lookup_slug: |
| 319 | + lnode = pb.index.network_tree.node_lookup_slug[subpath] |
299 | 320 | else:
|
300 |
| - url = None |
301 |
| - if msubpath in pb.index.network_tree.node_lookup: |
302 |
| - url = pb.index.network_tree.node_lookup[msubpath]["url"] |
303 |
| - elif msubpath in pb.index.network_tree.node_lookup_slug: |
304 |
| - url = pb.index.network_tree.node_lookup_slug[msubpath]["url"] |
305 |
| - else: |
306 |
| - parts.append(f'<span style="color: #666;">{subpaths[i]}</span>') |
307 |
| - previous_url = "" |
308 |
| - continue |
309 |
| - if url != previous_url: |
310 |
| - parts.append(f'<a href="{url}" ___COLOR___>{subpaths[i]}</a>') |
311 |
| - previous_url = url |
| 321 | + # try finding folder with same name in markdown folder |
| 322 | + # to get proper capitalization, even if we use slugify |
| 323 | + name = unquote(subpaths[i]) |
| 324 | + if name in folder_og_name_lut: |
| 325 | + name = folder_og_name_lut[name] |
| 326 | + |
| 327 | + parts.append(f'<span style="color: #666;">{name}</span>') |
| 328 | + previous_url = "" |
312 | 329 | continue
|
313 | 330 |
|
| 331 | + url = lnode["url"] |
| 332 | + name = lnode["name"] |
| 333 | + |
| 334 | + # in the case of folder notes, we have the folder and note name being the |
| 335 | + # same, we don't want to print this twice in the breadcrumbs |
| 336 | + if url != previous_url: |
| 337 | + parts.append(f'<a href="{url}" ___COLOR___>{name}</a>') |
| 338 | + previous_url = url |
| 339 | + |
| 340 | + # set all links to be normal text color except for the last link |
314 | 341 | parts[-1] = parts[-1].replace("___COLOR___", "")
|
315 | 342 | for i, link in enumerate(parts):
|
316 | 343 | parts[i] = link.replace("___COLOR___", 'style="color: var(--normal-text-color);"')
|
317 | 344 |
|
| 345 | + # combine parts into snippet |
318 | 346 | snippet = " / ".join(parts)
|
319 | 347 | snippet = f"""
|
320 | 348 | <div style="width:100%; text-align: right;display: block;margin: 0.5rem;">
|
|
0 commit comments