diff --git a/._itemcache/itemcache.json b/._itemcache/itemcache.json new file mode 100644 index 0000000..3af521d --- /dev/null +++ b/._itemcache/itemcache.json @@ -0,0 +1,1708 @@ +{ + "http://macintoshgarden.org/apps/after-dark-myst-screen-saver": { + "fetched_at": "2026-04-23T15:29:41.5558839+10:00", + "software_item": { + "Title": "After Dark: Myst Screen Saver", + "URL": "http://macintoshgarden.org/apps/after-dark-myst-screen-saver", + "Description": "After Dark Screen Saver modules of the game Myst. In a slide-show format. Covers all of the \"Ages\" in the game Myst. You can opt to install one or more or all of the modules. This was a give-away promotion. The source of this installer was from MacFormat magazine issue 40, August 1996 CD.\n\nMD5 checksum: 54e17afffb1f755f4dc19f39ef717483 *mystscreensaver.sit\n\nMyst_Screen_Saver-hybrid_CD.iso is a hybrid Mac/PC installer disc image sourced from the Internet Archive.\n\nMyst_screensaver_images.zip includes the 249 JPEG files that are installed as part of the screen saver.\n\nRuns on a 68020 CPU, Mac OS 7.0 or newer. Does not run on Mac OS 9. - I wouldn't attempt it even on 8.0 or newer without updating the After Dark Engine vers. 3.0 that this installs 1st. You are probably safe to use this as is with a Mac OS from vers. 7.0 to 7.6.1\n\nNot native PPC but will run on PPC OK given the right conditions.\n\nNote: If you opt for the full install or install the Selenitic Tour module individually, the installer claims the image \"Selene20.JPG may be damaged, use with caution\", It can be safely deleted (there are over 200 images in total, you won't miss one less).\n\n[Warning:] The After Dark Engine that gets installed is version 3.0b - I think the \"b\" stands for buggy. In the installation process it creates on Basilisk II (perhaps other Macs too), a corrupt Preferences file in the System Folder's Preferences Folder in its own \"After Dark Preferences\" folder. What happens is; after installing you need to reboot your Mac. After booting up, After Dark immediately starts and locks you out of your computer. It asks for a nonexistent password (not the provided installation password) and you have no option but to crash your Mac and start up with Extensions turned off.\n\nThe Workaround:\n[a] Replace the installed corrupt Preferences with the supplied working After Dark Preferences file from the above 2nd Download link on this page, named \"a-dark-3-prefs.sit\". Better: Preempt the corrupt Prefs file before you install the screen saver by extracting the After Dark Prefs folder from the \"a-dark-3-prefs.sit\" file and placing this folder inside of your System Folder's Preferences folder.\n[b] Alternatively; Install the After Dark 4.0 (Engine) from here at the Macintosh Garden. And only install the Myst Saver Modules by choosing only the modules that you want to Install. That is, don't choose a Full Install or elect to install the After Dark Engine 3 from the Myst Screen Saver Installer. This would give better compatibility with newer Mac OS's too.", + "Downloads": [ + { + "Title": "mystscreensaver.sit", + "Size": "24.44 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/mystscreensaver.sit?expires=1776922480\u0026token=ISNFjOjmLIMn_L_IT0nwNA\u0026st=17b538827ab4708499453429927efd7a" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/mystscreensaver.sit" + }, + { + "Text": "54e17afffb1f755f4dc19f39ef717483", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=mystscreensaver.sit" + } + ] + }, + { + "Title": "a-dark-3-prefs.sit", + "Size": "1.22 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/a-dark-3-prefs.sit?expires=1776922480\u0026token=cWRrVPNM51syzI3AanEYqg\u0026st=02efece000093e06f20ef6ca61f222bd" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/a-dark-3-prefs.sit" + }, + { + "Text": "3c3633a865eddb97102f9b58230e4c7a", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=a-dark-3-prefs.sit" + } + ] + }, + { + "Title": "Myst_Screen_Saver-hybrid_CD.iso_.sit", + "Size": "49.07 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Myst_Screen_Saver-hybrid_CD.iso_.sit?expires=1776922480\u0026token=ipRxePsCn8DcILxclNKTow\u0026st=2c7a71c4be571db21ac67f9b0ef0772b" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Myst_Screen_Saver-hybrid_CD.iso_.sit" + }, + { + "Text": "ed8267c8f9c2747489c2df577556fca7", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Myst_Screen_Saver-hybrid_CD.iso_.sit" + } + ] + }, + { + "Title": "Myst_screensaver_images.zip", + "Size": "21.77 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Myst_screensaver_images.zip?expires=1776922480\u0026token=e3WLlrubrXp167dV_sj2Xw\u0026st=8d4813d0d15e77034f94b9afa8df0751" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Myst_screensaver_images.zip" + }, + { + "Text": "2988714aa8daea162d836dac465beea4", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Myst_screensaver_images.zip" + } + ] + } + ], + "Screenshots": [ + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/myst-scrnsvr-01.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/myst-scrnsvr-02.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/myst-scrnsvr-03.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/myst-scrnsvr-04.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/myst-scrnsvr-05.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/myst-scrnsvr-06.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/myst-scrnsvr-07.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/myst-scrnsvr-08.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/myst-scrnsvr-09.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/myst-scrnsvr-10.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Myst_SS_front.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Myst_SS_back.jpg" + ] + } + }, + "http://macintoshgarden.org/apps/clarisworks-2": { + "fetched_at": "2026-04-21T11:34:03.9289679+10:00", + "software_item": { + "Title": "ClarisWorks 2.0 \u0026 2.1", + "URL": "http://macintoshgarden.org/apps/clarisworks-2", + "Description": "See also: ClarisWorks 1, ClarisWorks 3, ClarisWorks 4, ClarisWorks 4 for Teachers, ClarisWorks 5, AppleWorks 5, AppleWorks 6, AppleWorks 6 J\n\nThe ClarisWorks 2.0 install sets are 68k native, only.\n\nThe ClarisWorks 2.1 install sets are FAT; 68k \u0026 PPC native.\n\nDL #1 is useful if you want to install this version of ClarisWorks to a 68k Macintosh, including Mini vMac \u0026 Basilisk II. It can still be updated to version 2.1CDv4 using the updater in the 3rd DL above.\n\nWith DL #2 and DL #7 above, the installer program will detect the type of Macintosh CPU you are installing to and install the appropriate (68k or PPC native) ClarisWorks program onto your Macintosh. The installer will only require and ask for \"Disk 3\" if you are installing ClarisWorks to a PPC Macintosh.\n\nWhen running ClarisWorks 2.x for the first time, you will be asked for a user name and serial. Enter a name. The serial number field is optional, and is not required.", + "Downloads": [ + { + "Title": "clarisworks-2cdv1.sit", + "Size": "2.08 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/clarisworks-2cdv1.sit?expires=1776735543\u0026token=FZon-OWGTUpKF4D4NE_lPA\u0026st=e64f3f2d19f8e47863777bf5485fc0af" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/clarisworks-2cdv1.sit" + }, + { + "Text": "e39beee76632ee0756242ea34321cbd3", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=clarisworks-2cdv1.sit" + } + ] + }, + { + "Title": "cwks-21cdv3-mac.sit", + "Size": "2.92 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/cwks-21cdv3-mac.sit?expires=1776735543\u0026token=RYCJiUFmOsIssLxXPejiWg\u0026st=4a28f3cd7b32ac2c4519fbd89163ca5f" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/cwks-21cdv3-mac.sit" + }, + { + "Text": "cb3435ec222239cdd32fec466e1fc9bd", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=cwks-21cdv3-mac.sit" + } + ] + }, + { + "Title": "clarisworks-21cdv4-updtr.sit", + "Size": "604.27 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/clarisworks-21cdv4-updtr.sit?expires=1776735543\u0026token=ADdjfMsm9qSY_-DO0Uqacg\u0026st=1b2b9abcf1005ddfb6bad6e2b5c912a7" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/clarisworks-21cdv4-updtr.sit" + }, + { + "Text": "a96642f1f0bd40b35b2c45541589a44a", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=clarisworks-21cdv4-updtr.sit" + } + ] + }, + { + "Title": "ClarisWorks_20v1_NL.sit", + "Size": "2.36 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/ClarisWorks_20v1_NL.sit?expires=1776735543\u0026token=Rhuipj1g3LuOsXW6Iv19Hw\u0026st=0bccc4c857e31fe3fb48786166aec116" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/ClarisWorks_20v1_NL.sit" + }, + { + "Text": "3931fe57f58461fe30077b6f1b96b79b", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=ClarisWorks_20v1_NL.sit" + } + ] + }, + { + "Title": "ClarisWorks_21v4_Update_NL.sit", + "Size": "437.66 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/ClarisWorks_21v4_Update_NL.sit?expires=1776735543\u0026token=_7LWcMh_HSOOYcFgAgt4SA\u0026st=408f5d474ea86bae7ad4e88787aa76d8" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/ClarisWorks_21v4_Update_NL.sit" + }, + { + "Text": "dc7f1da312a32fc8da0b00eb2f381a83", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=ClarisWorks_21v4_Update_NL.sit" + } + ] + }, + { + "Title": "ClarisWorks_ITA.sit", + "Size": "2.18 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/ClarisWorks_ITA.sit?expires=1776735543\u0026token=N9TEw-ZfsgCuho33Z-x9bg\u0026st=a626ffbbfcffb3a7f6abce78f0cb4dda" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/ClarisWorks_ITA.sit" + }, + { + "Text": "b34f6d9060fc51e4b54105973241baad", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=ClarisWorks_ITA.sit" + } + ] + }, + { + "Title": "ClarisWorks-2.1-Sweden.sit", + "Size": "3.69 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/ClarisWorks-2.1-Sweden.sit?expires=1776735543\u0026token=-ah7q4pynclZVqIqtsj8lA\u0026st=d50d9d04b98780fa882da65b8b0f7f48" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/ClarisWorks-2.1-Sweden.sit" + }, + { + "Text": "0883afa59f059b0a6f220b85bbaee818", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=ClarisWorks-2.1-Sweden.sit" + } + ] + }, + { + "Title": "ClarisWorks_2.0v1.sit", + "Size": "2.09 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/ClarisWorks_2.0v1.sit?expires=1776735543\u0026token=t_tYPZivVSpOk2HOvZ6s0A\u0026st=75a00c4c732ae96b26380a3ee2baf87e" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/ClarisWorks_2.0v1.sit" + }, + { + "Text": "931f8f3827d4715c09e2f8d99803c23a", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=ClarisWorks_2.0v1.sit" + } + ] + }, + { + "Title": "ClarisWorks20Bv1.hqx", + "Size": "3.00 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/ClarisWorks20Bv1.hqx?expires=1776735543\u0026token=Y0UELIDotXVGxoi7FVxvXQ\u0026st=2ed18f8add1dd37c2e21c6fa98867c79" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/ClarisWorks20Bv1.hqx" + }, + { + "Text": "c4e1278102e327385351a78be9a643d9", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=ClarisWorks20Bv1.hqx" + } + ] + }, + { + "Title": "CW_2.0Bv1_Images.sit", + "Size": "2.48 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/CW_2.0Bv1_Images.sit?expires=1776735543\u0026token=Ahuqut_3-V_NrLKSRkAcZw\u0026st=a21c626c22aac0ee044089b9a30ca62c" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/CW_2.0Bv1_Images.sit" + }, + { + "Text": "48f48c6a70ff9fa68c6554a9bd76d3fd", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=CW_2.0Bv1_Images.sit" + } + ] + } + ], + "Screenshots": [ + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/claris2.1.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/cwks-21cdv3-01.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/cwks-21cdv3-02.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/cwks-21-first.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/MacFormat_002_p063.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/MacFormat_002_p064.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/MacFormat_002_p065.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Mac_Format_15_p076.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Mac_Format_15_p077.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/MacFormat_06_p006.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/ClarisWorks2box.jpeg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/ClarisWorks2back.jpeg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/ClarisWorks-2.1-Swede-discs.jpg" + ] + } + }, + "http://macintoshgarden.org/apps/hypercard-21": { + "fetched_at": "2026-05-01T14:10:02.7661545+10:00", + "software_item": { + "Title": "HyperCard 2.1", + "URL": "http://macintoshgarden.org/apps/hypercard-21", + "Description": "Download #1: This is the full 800K floppy disk set in Disk Copy 4.2 image format of Apple's HyperCard 2.1 for Macintosh archived in StuffIt 3.5 format for backwards compatibility. - Not sourced from original media, these have been recovered and converted from poor copies of the original installation disk set.\n\nDownload #2: The Swedish version of Hypercard 2.1. It is unknown whether it is complete. Likely not, since it's coming only on a single 800k Disk Copy 4.2 image. Does include the Swedish application though.\n\nSee also: HyperCard 1.0.1, HyperCard Z1-1.2.2 (Australian), HyperCard DK-1.2.2 (Danish), HyperCard 1.2.5, HyperCard C1-2.0v2 (French), HyperCard 2.1 (Player), HyperCard 2.1b13 (Beta), HyperCard 2.2, HyperCard 2.3 + Addmotion II, HyperCard 2.4.1\n\nRequires the following minimum hardware and software:\n\ni. A Macintosh computer with 1 megabyte (MB) of memory or more. To run HyperCard under MultiFinder, your system should have at least 2 MB of memory.\nii. System software version 6.0.5 or later. No idea if this runs under \"Classic\" on Mac OS X but it may do.\n\n68k coded, runs fine on PPC up to 9.2.2\n\nNote: There are two additional patched stacks supplied in with this download which fix compatibility issues with Mac OS 8–9 in their equivalent, included original stacks. (see accompanying read me texts included in DL).", + "Downloads": [ + { + "Title": "hc-21-diskset.sit", + "Size": "2.58 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/hc-21-diskset.sit?expires=1777608902\u0026token=KetZJmBqMWC77waf1A4k5w\u0026st=157b1f2f237fbf4b71458ddecb7a7339" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/hc-21-diskset.sit" + }, + { + "Text": "bafb69d076ebe2be93229d9837439229", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=hc-21-diskset.sit" + } + ] + }, + { + "Title": "Swedish_HC_v2.1.sit", + "Size": "374.42 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Swedish_HC_v2.1.sit?expires=1777608902\u0026token=BN_NVzr8WmAg_Aj22ayQmg\u0026st=bffcca473faeec54677edb6c4c7e2688" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Swedish_HC_v2.1.sit" + }, + { + "Text": "d6d5d0b455bbe79116cfbf6f0b25619e", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Swedish_HC_v2.1.sit" + } + ] + } + ], + "Screenshots": [ + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/hc21-full-00.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/hc21-full-01.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/hc21-full-02.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/hc21-full-03.gif", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/MacFormat_06_p068.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/MacFormat_06_p069.jpg" + ] + } + }, + "http://macintoshgarden.org/apps/microsoft-word": { + "fetched_at": "2026-04-21T12:15:58.0666396+10:00", + "software_item": { + "Title": "Microsoft Word (3–5)", + "URL": "http://macintoshgarden.org/apps/microsoft-word", + "Description": "Many still claim that version 5.1a is the \"Best. Word. Ever.\" People swore by it so much that the later Word 98 offered the choice of 5.1-style menus.\n\nDL #1 includes Microsoft Word 3.00\n\nDL #2 includes Microsoft Word 3.0.1, Microsoft Word 4.0, Microsoft Word 5.0, Microsoft Word 5.1a\n\nDL #3 contains PersonalizeWord 1.0, which customizes the user name and organization strings in Word 4.0–6.0\n\nDL #4 contains a colour replacement of the B\u0026W toolbar for Word 5.1\n\nDL #5 is Word 5.1a repackaged to .dsk images for easy use with vMac emulators\n\nDL #6 is a pre-installed German Word 4.0 salvaged from an office LC\n\nDL #7 pre-installed Swedish Word 5.0 och 5.1\n\nDL #8 Swedish install floppies for Word 5.0, previously personalized. Stuffed with StuffIt Deluxe 4.0. Swedish install media for v5.1 can be downloaded here.\n\nDL #9 Word 4.00D original disk images in Disk Copy 4.2 format. Unfortunately missing the \"Utilities 1\" disk so it is not complete, but the program is a later version than the other Word 4.0 installations uploaded here.", + "Downloads": [ + { + "Title": "Word3.sit_.hqx", + "Size": "1.01 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word3.sit_.hqx?expires=1776738057\u0026token=riOmqYH7IES7jYTmG8hOBw\u0026st=411753aadce2e081523e1e368933c616" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word3.sit_.hqx" + }, + { + "Text": "b42864a44114c68f9a543165e99b5a01", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word3.sit_.hqx" + } + ] + }, + { + "Title": "Microsoft_Word.sit", + "Size": "8.71 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Microsoft_Word.sit?expires=1776738057\u0026token=hTkdSzU8id6WoXNI2k8Whg\u0026st=21f89334f792c8a743c787d422f3f735" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Microsoft_Word.sit" + }, + { + "Text": "fd4592671de8bdebfe97ad545fabe31d", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Microsoft_Word.sit" + } + ] + }, + { + "Title": "personalizeword1.0.sit", + "Size": "10.66 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/personalizeword1.0.sit?expires=1776738057\u0026token=QebJ_UPovk8EuKD2y1gSdg\u0026st=45ab1a288e0a6393bfe2cd7b14376bcd" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/personalizeword1.0.sit" + }, + { + "Text": "83a80f37ae742e28a244aa3b1d065ebd", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=personalizeword1.0.sit" + } + ] + }, + { + "Title": "Word51ColorT.sit", + "Size": "63.19 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word51ColorT.sit?expires=1776738057\u0026token=FwvaxnRjkv_jvhTlGeEhpA\u0026st=9fe72747b40956a51d08f100e377fd72" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word51ColorT.sit" + }, + { + "Text": "bd6ca83bb5dba99c03c1ca1e98b64185", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word51ColorT.sit" + } + ] + }, + { + "Title": "Word_5.1_dsk_Images.zip", + "Size": "4.15 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word_5.1_dsk_Images.zip?expires=1776738057\u0026token=1zozPUXDwMiYFRq-8mmdrA\u0026st=ecc6b1738c87d960588467698dc97e8d" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word_5.1_dsk_Images.zip" + }, + { + "Text": "8998b7d5f4bbf4919919959a6dfdd735", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word_5.1_dsk_Images.zip" + } + ] + }, + { + "Title": "Word4.0.sit", + "Size": "928.26 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word4.0.sit?expires=1776738057\u0026token=1Z7PEPeH3BeB_9JKoDEdBw\u0026st=df098492ddfde9c43bd59ecd60e194b3" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word4.0.sit" + }, + { + "Text": "7c36416e9a0cb9a7cb9eb74ef5fab4a8", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word4.0.sit" + } + ] + }, + { + "Title": "Word_5.x_Sv.sit", + "Size": "7.42 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word_5.x_Sv.sit?expires=1776738057\u0026token=PyuR7wfbuSv4KnAp6sanCg\u0026st=2b7c87e89f118abbd76e7552789baca3" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word_5.x_Sv.sit" + }, + { + "Text": "7aab806f4ab97a86afa609ee425c6cf5", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word_5.x_Sv.sit" + } + ] + }, + { + "Title": "Microsoft_Word_5.0_sv.sit", + "Size": "4.04 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Microsoft_Word_5.0_sv.sit?expires=1776738057\u0026token=0bubTuwu7f9XPAaGlydzRA\u0026st=386ff9431247b3ad5c1f9dd01abb12b9" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Microsoft_Word_5.0_sv.sit" + }, + { + "Text": "4ca08921d968ef17a7b10a8fd9a7c379", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Microsoft_Word_5.0_sv.sit" + } + ] + }, + { + "Title": "Word_4.00D_incomplete.sit_.hqx", + "Size": "1,013.94 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word_4.00D_incomplete.sit_.hqx?expires=1776738057\u0026token=WCkNbqqOog2xwEHnbZinvg\u0026st=f3aff0d1aa138cb86506c49a1fd590f3" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word_4.00D_incomplete.sit_.hqx" + }, + { + "Text": "f87c50c0c57a49c40e7acec7e81052fc", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word_4.00D_incomplete.sit_.hqx" + } + ] + } + ], + "Screenshots": [ + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Word-5.1a-Color-Toolbar.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/ms-word-4-01.png", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/word-5.1a.png", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/MicrosftWord50_front.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/MicrosftWord50_back.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/wordflop.jpg" + ] + } + }, + "http://macintoshgarden.org/apps/microsoft-word-105": { + "fetched_at": "2026-04-21T12:51:39.9886417+10:00", + "software_item": { + "Title": "Microsoft Word 1.00 - 1.05", + "URL": "http://macintoshgarden.org/apps/microsoft-word-105", + "Description": "After much searching I managed to find the elusive Microsoft Word 1.05. The file included here is a .dsk image of the original disk, which also includes the invisible and protected file that is used for verifying you have an original disk. If you copy this version of Word to another disk and launch it from there you will be prompted to insert this floppy image so it can check for the file. However, you can boot from this disk and launch Word without any problems using Mini vMac.\n\nIn addition, Copy II HD will successfully move the files to a hard drive in MinivMac (tested using version 7.0 ).\n\nThe working disk image of Microsoft Word 1.00 I have uploaded here comes from my old backups that I made on CDs long time ago.\nIt boots under Mini vMac 36.04, loads System 1.1/Finder 1.1g and I think it’s a full image (see the pictures I uploaded).\nIt works well under System 6.0.8 too, but not with next versions of Mac OS (it freezes the system).\n\nI archived the image .zip as with DropStuff and Stuffit Expander 4.5, that I tested, the image becomes corrupted.", + "Downloads": [ + { + "Title": "word_105.zip", + "Size": "430.84 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/word_105.zip?expires=1776740199\u0026token=OXePN7A0RinWALp6kseT6g\u0026st=7e370e9b372afed518b9af9d1cbba9ec" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/word_105.zip" + }, + { + "Text": "d4d4c9a236370432c71e72915798e1f9", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=word_105.zip" + } + ] + }, + { + "Title": "Microsoft_Word_1.00.image_.zip", + "Size": "228.61 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Microsoft_Word_1.00.image_.zip?expires=1776740199\u0026token=a2Grv40mCXH6eTsB3IxR6w\u0026st=e80760eb92e77f64762ead9c03a34c67" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Microsoft_Word_1.00.image_.zip" + }, + { + "Text": "0531ba7c50c35edcf7dec4a03f37ea58", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Microsoft_Word_1.00.image_.zip" + } + ] + } + ], + "Screenshots": [ + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/word_0.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/screen_shot_2013-12-27_at_10.22.21_pm.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/screen_shot_2013-12-27_at_10.26.20_pm.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/screen_shot_2013-12-27_at_10.26.40_pm.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/screen_shot_2013-12-27_at_10.27.42_pm.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/screen_shot_2013-12-27_at_10.28.18_pm.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Word1.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Word2.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Word3.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/word1box.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/word1floppy.jpg" + ] + } + }, + "http://macintoshgarden.org/apps/microsoft-word-51-0": { + "fetched_at": "2026-04-21T11:30:25.105751+10:00", + "software_item": { + "Title": "Microsoft Word 5.1", + "URL": "http://macintoshgarden.org/apps/microsoft-word-51-0", + "Description": "Word processing software. Well regarded to the point that users continued to use it long after the 68k transition. To personalize the program with your name, copy disk image Install to your hard disk and replace the file Word Installer with the file from Word_Installer.sit.\n\nSee Also: Office 3 (US English), Word 5.1 (Danish), Word 5.1a (Dutch), Word 5.1 (Canadian French), Word 3-5 (US English), Word 5.1a (Italian)\n\nIf running on a Macintosh Plus or earlier, the 'Clear' key can be use as a substitute for NumLock.", + "Downloads": [ + { + "Title": "Word_5.1_EN-US.sit", + "Size": "4.03 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word_5.1_EN-US.sit?expires=1776735324\u0026token=Ch_MX8VMXmmqixWA13vt9Q\u0026st=5d14f78b2d44a48edd8ff965d319a8e3" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word_5.1_EN-US.sit" + }, + { + "Text": "15dad616752bd47ab6a582ef0b1f6922", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word_5.1_EN-US.sit" + } + ] + }, + { + "Title": "Word_Installer.sit", + "Size": "74.72 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word_Installer.sit?expires=1776735324\u0026token=-_UF-D540IF3eXDVIOagpw\u0026st=58eae54291457cacbe34d0762aa8c7d3" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word_Installer.sit" + }, + { + "Text": "7e9e5d4c6e05c5f13b99a8610156dda8", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word_Installer.sit" + } + ] + }, + { + "Title": "Install-w-img.sit", + "Size": "581.14 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Install-w-img.sit?expires=1776735324\u0026token=PzXu1y1Zv2jEHbQEdPZPfQ\u0026st=250576285558aca242cea71215ea7822" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Install-w-img.sit" + }, + { + "Text": "060192b61064409f427b87372d9957d5", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Install-w-img.sit" + } + ] + }, + { + "Title": "Word_5.1a_Patch.image_.hqx", + "Size": "25.67 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word_5.1a_Patch.image_.hqx?expires=1776735324\u0026token=b6SR5y30feYik2THUv1o-g\u0026st=322df59db7d30057a5b19c76eee75083" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word_5.1a_Patch.image_.hqx" + }, + { + "Text": "f84c1e41c018ab5b49936d732641c102", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word_5.1a_Patch.image_.hqx" + } + ] + }, + { + "Title": "Word5.1a.sit", + "Size": "4.08 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word5.1a.sit?expires=1776735324\u0026token=ahZKrVeaMM9HZR52G2ywhA\u0026st=c7555296ad4bbac371e84b44f2e4b044" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word5.1a.sit" + }, + { + "Text": "563c5926c123c21f12392bce9f637807", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word5.1a.sit" + } + ] + }, + { + "Title": "Ms-Word-51a-AUS.sit", + "Size": "4.89 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Ms-Word-51a-AUS.sit?expires=1776735324\u0026token=8vhPfnpORpbROKUyYhfRKA\u0026st=a8d094666be0dc34e78b287accb15e08" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Ms-Word-51a-AUS.sit" + }, + { + "Text": "33dddca797c9fb0a03863659f319d3a7", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Ms-Word-51a-AUS.sit" + } + ] + }, + { + "Title": "D-Word_5.1.sit", + "Size": "3.50 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/D-Word_5.1.sit?expires=1776735324\u0026token=ITcGOH8JfGbn1P7-QePxOA\u0026st=8dc616e055fdac0afdba6fad6155cc4a" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/D-Word_5.1.sit" + }, + { + "Text": "3a9cbe353cce460a9de8b8d7973c9a17", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=D-Word_5.1.sit" + } + ] + }, + { + "Title": "MS-Word-51a-iso.zip", + "Size": "7.96 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/MS-Word-51a-iso.zip?expires=1776735324\u0026token=EyweNnyk4zKIsigNQf6Rmg\u0026st=19f1621b5a67e7fc2ba2e0ee9116382d" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/MS-Word-51a-iso.zip" + }, + { + "Text": "c023ad5ef44da1db702ab62ac37cc82f", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=MS-Word-51a-iso.zip" + } + ] + }, + { + "Title": "Word-6-Converter-for-MW5x.sit", + "Size": "134.57 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word-6-Converter-for-MW5x.sit?expires=1776735324\u0026token=s2m6maUaG4Em5YxFY0T3pg\u0026st=8e0301c147b6e7ab7fe5c3c4364fc171" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word-6-Converter-for-MW5x.sit" + }, + { + "Text": "5575e7768f83f059d6b46421e2636f48", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word-6-Converter-for-MW5x.sit" + } + ] + }, + { + "Title": "Word-97-8-Convrtr-for-W5-6.sit", + "Size": "586.22 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word-97-8-Convrtr-for-W5-6.sit?expires=1776735324\u0026token=S0UKJwDIxZ0UPpQldzhchA\u0026st=36af2ee2fc20fba2a33e1a1191d716ef" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word-97-8-Convrtr-for-W5-6.sit" + }, + { + "Text": "0844b08bdec28fecc97ca96712719962", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word-97-8-Convrtr-for-W5-6.sit" + } + ] + }, + { + "Title": "Word5.1-Finnish.sit", + "Size": "4.96 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word5.1-Finnish.sit?expires=1776735324\u0026token=vLNmndp_wxHvozvYH0cQ3A\u0026st=147b0bb9779c674dc09f990755589dc7" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word5.1-Finnish.sit" + }, + { + "Text": "9a8fc5b25bc50cc13d8d1698bd63915b", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word5.1-Finnish.sit" + } + ] + }, + { + "Title": "Microsoft_Word_5.1_sv.sit", + "Size": "4.44 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Microsoft_Word_5.1_sv.sit?expires=1776735324\u0026token=V8DsjLlICWpgokPNjS5cCg\u0026st=eddea2c00fdb2e883821661fc0aad3fc" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Microsoft_Word_5.1_sv.sit" + }, + { + "Text": "ba5fe650b37425fb973fdbaede6fea18", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Microsoft_Word_5.1_sv.sit" + } + ] + }, + { + "Title": "Word_5.1_en-GB_Proofing_Tools.sit_.bin", + "Size": "1.02 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word_5.1_en-GB_Proofing_Tools.sit_.bin?expires=1776735324\u0026token=yLMz31gb3fNZufuFz_4TTw\u0026st=ccd3c08c5b3c33a95375de785a1db655" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word_5.1_en-GB_Proofing_Tools.sit_.bin" + }, + { + "Text": "6b8ec4ec858a8ebe3530a1f55ebdc1ae", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word_5.1_en-GB_Proofing_Tools.sit_.bin" + } + ] + }, + { + "Title": "Word_5.1_fr-FR_Proofing_Tools.sit_.bin", + "Size": "1,008.13 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Word_5.1_fr-FR_Proofing_Tools.sit__2.bin?expires=1776735324\u0026token=JajGX2FChpC3QZQcPh4uJQ\u0026st=706b90ea3abbc17dbb2e4c8f307f045a" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Word_5.1_fr-FR_Proofing_Tools.sit__2.bin" + }, + { + "Text": "7fc061909a95c81fa562a501e25cb518", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Word_5.1_fr-FR_Proofing_Tools.sit__2.bin" + } + ] + }, + { + "Title": "BasedWord51a.sit", + "Size": "3.26 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/BasedWord51a.sit?expires=1776735324\u0026token=9nhI5LRTVjrK80Cz5-3SqQ\u0026st=562eb269da570a572f2454199ac3c780" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/BasedWord51a.sit" + }, + { + "Text": "c48e93dc61b2e5791ba73b7ce504efd2", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=BasedWord51a.sit" + } + ] + } + ], + "Screenshots": [ + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/What_is_new_in_Microsoft_Word_5.1.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/7C34151B-D776-404D-9414-DBBA2692DD84.jpeg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Word-6-Converter-for-Word-5x-00.png" + ] + } + }, + "http://macintoshgarden.org/apps/star-trek-episode-guides": { + "fetched_at": "2026-04-21T13:11:32.1425914+10:00", + "software_item": { + "Title": "The Star Trek Guides", + "URL": "http://macintoshgarden.org/apps/star-trek-episode-guides", + "Description": "HyperCard stacks by David Landis with lots of information about episodes, movies, and characters of The Original Series (including the animated continuation), The Next Generation, Deep Space Nine, Voyager, and Enterprise.\n\nDownload #1: TOS/TNG/DS9/VOY/ENT v4.0, covering all episodes of the franchise through ENT Season 2, with pre-air information on early S3. (Going by Wayback Machine archives of the author's site, these are the final versions of the stacks.)\n\nDownload #2: TNG v2.1 and TOS/DS9/VOY v3.0, covering all episodes up to DS9 mid-S4 and VOY mid-S2.\n\nDownload #3: TNG v1.0, covering TNG episodes up to mid-S5.\n\nIf the stacks for all of the series are placed in the same folder, you can navigate seamlessly between them via assorted hyperlinks in the text.\n\nNeeds HyperCard or HyperCard Player to be executed.\n\nSuffers from slow dissolve transitions and crashes in Basilisk II.", + "Downloads": [ + { + "Title": "Star_Trek_Guides_4.0.sit", + "Size": "3.52 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Star_Trek_Guides_4.0.sit?expires=1776741391\u0026token=Tn9ERuzjhnWn0Ej7tljfdw\u0026st=82a3f35f11b022a814b85ac900c8fb98" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Star_Trek_Guides_4.0.sit" + }, + { + "Text": "149f6741d32db6305ef1729d8a7125d3", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Star_Trek_Guides_4.0.sit" + } + ] + }, + { + "Title": "trek_guides.img_.sit", + "Size": "3.06 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/trek_guides.img_.sit?expires=1776741391\u0026token=m9lWYvuyQkeTTiV3qCxPbg\u0026st=4d8e44a314b18dab20b9931a6b90f16c" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/trek_guides.img_.sit" + }, + { + "Text": "7ebca8d46415f8006f061bafd341fa77", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=trek_guides.img_.sit" + } + ] + }, + { + "Title": "Star_Trek-TNG.img_.sit", + "Size": "294.38 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/Star_Trek-TNG.img_.sit?expires=1776741391\u0026token=4P4DeGUMs7_LL_B-GweGMg\u0026st=6a96b13d8416ace9f6f64e9b5a4e7e5a" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/Star_Trek-TNG.img_.sit" + }, + { + "Text": "ad70ab43fed9017dd438a045a91dab6f", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Star_Trek-TNG.img_.sit" + } + ] + } + ], + "Screenshots": [ + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/screen-tng.png", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/screen-ds9.png", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/1_Star_Trek_-_TOS_Guide_4.0.PNG", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/2_Star_Trek_-_TNG_Guide_4.0.PNG", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/3_Star_Trek_-_DS9_Guide_4.0.PNG", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/4_Star_Trek_-_VOY_Guide_4.0.PNG", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/5_Star_Trek_-_ENT_Guide_4.0.2.PNG" + ] + } + }, + "http://macintoshgarden.org/apps/stuffit-expander-351": { + "fetched_at": "2026-04-21T12:54:23.1352158+10:00", + "software_item": { + "Title": "StuffIt Expander 3.5.1", + "URL": "http://macintoshgarden.org/apps/stuffit-expander-351", + "Description": "StuffIt Expander v3.5.1. Predates the standard v4.0.2 typically used with Mini vMac. Packaged as a zipped disk image.\n\nSee Also\nStuffIt Expander 5.5\nStuffIt Expander 4.5\nStuffIt Expander 4.0.2\nStuffIt Deluxe", + "Downloads": [ + { + "Title": "stuffit_3.5.1.zip", + "Size": "783.42 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSystem 1 - 5\t\t\t\t\t\t\t\t\t\t\t\t - System 7.0 - 7.6", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/apps/stuffit_3.5.1.zip?expires=1776740362\u0026token=YzH7L_mxbk-M5WF1qWRqBA\u0026st=3605b45fd9389a11b4c9caa5a7d013a7" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/apps/stuffit_3.5.1.zip" + }, + { + "Text": "afdc69dd91933db1717cffb3c73660b4", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=stuffit_3.5.1.zip" + } + ] + } + ], + "Screenshots": [ + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/StuffIt351-1_0.png", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/StuffIt351-2_0.png" + ] + } + }, + "http://macintoshgarden.org/games/boom": { + "fetched_at": "2026-04-21T14:01:32.1058855+10:00", + "software_item": { + "Title": "BOOM", + "URL": "http://macintoshgarden.org/games/boom", + "Description": "Described by the creators as \"Bomberman meets Doom\", Boom vigorously thrusts 80 levels directly into your cranium. You and a friend/relative/hostage can traverse 8 different areas populated with creatively devious enemies to highly memorable backing music in this classic arcade thriller.\n\nMac OS 7.0.1 — Mac OS X (Sierra), download the right version for you!", + "Downloads": [ + { + "Title": "BOOM_1.0.sit", + "Size": "1.89 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/BOOM_1.0.sit?expires=1776744391\u0026token=BulDPm7RQmECvCwL7rteQw\u0026st=178d1214b6a88835b8b002e037022c53" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/BOOM_1.0.sit" + }, + { + "Text": "4be430e950a21cfc207d8d684f31165a", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=BOOM_1.0.sit" + } + ] + }, + { + "Title": "BOOM-114r2.bin", + "Size": "2.15 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/BOOM-114r2.bin?expires=1776744391\u0026token=0gRdQi9VyjbC_7iI9Ut-2w\u0026st=132083b52e5be002b69abe7e9cef6ebf" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/BOOM-114r2.bin" + }, + { + "Text": "f7f54ee6b74a3d7d344d86369363ec4b", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=BOOM-114r2.bin" + } + ] + }, + { + "Title": "BOOM2.zip", + "Size": "19.96 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/BOOM2.zip?expires=1776744391\u0026token=M59_qMmVPOU2swGFtiOt3g\u0026st=b1fcbc1bde36c8073fcf3138d70eb83e" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/BOOM2.zip" + }, + { + "Text": "5920fdebe104a8622552b900b462c146", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=BOOM2.zip" + } + ] + }, + { + "Title": "boom-123.sit", + "Size": "2.23 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/boom-123.sit?expires=1776744391\u0026token=Od0DGZ2mQ4B2QY9eYFMaPA\u0026st=0b99220a6cc7fd0f5cb737897e514308" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/boom-123.sit" + }, + { + "Text": "524e662e0dccf43456e727ec62e497a9", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=boom-123.sit" + } + ] + }, + { + "Title": "BOOM_1.4.5.zip", + "Size": "2.57 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/BOOM_1.4.5.zip?expires=1776744391\u0026token=lc4_Kng15SdiqymLP-0X1Q\u0026st=c1dcea1a5d7a716899ea2f6b63476f3e" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/BOOM_1.4.5.zip" + }, + { + "Text": "69b0d79cdd7e8dad55174c2dbb68203e", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=BOOM_1.4.5.zip" + } + ] + }, + { + "Title": "BOOM_1.4.5.sitx", + "Size": "2.28 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/BOOM_1.4.5_0.sitx?expires=1776744391\u0026token=IS36hiAaADRYYedUFIRUFg\u0026st=2c27933d21934331d9c7835604ba6096" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/BOOM_1.4.5_0.sitx" + }, + { + "Text": "56215de83c3fdd53f28ea65c0bb43bb5", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=BOOM_1.4.5_0.sitx" + } + ] + }, + { + "Title": "boom-151.hqx", + "Size": "2.86 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/boom-151.hqx?expires=1776744391\u0026token=GVJP2KUiqxMlzaJBXlvFeA\u0026st=ef2c41643b6395200f79d4ae86b88860" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/boom-151.hqx" + }, + { + "Text": "845099dd02ea4028bed79ce35c493683", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=boom-151.hqx" + } + ] + }, + { + "Title": "boom-154.sit", + "Size": "2.43 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/boom-154.sit?expires=1776744391\u0026token=gmjZy-d5Kx6wTV9AFZAo2w\u0026st=16ac7b9a0067fe014d2eb3a936583b4d" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/boom-154.sit" + }, + { + "Text": "f6ef6f73a17e4b6399e8d45c1608969c", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=boom-154.sit" + } + ] + }, + { + "Title": "boom-155.sit", + "Size": "2.43 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/boom-155.sit?expires=1776744391\u0026token=vUflKcI6ry5em3t7DyVdcQ\u0026st=26d98ec8b05299bbfbd6163dc57714c9" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/boom-155.sit" + }, + { + "Text": "19c7e196f2d475d33c006daa01f60d4f", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=boom-155.sit" + } + ] + }, + { + "Title": "boom-157.sit", + "Size": "2.61 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/boom-157.sit?expires=1776744391\u0026token=f2b3oLU66aydXCkgsJ6_xQ\u0026st=f3198a03eb94d7f17a083279308322d2" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/boom-157.sit" + }, + { + "Text": "833d2484e3487b452e6efead48648f90", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=boom-157.sit" + } + ] + }, + { + "Title": "boom-162.dmg_.zip", + "Size": "3.16 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/boom-162.dmg_.zip?expires=1776744391\u0026token=hnckPri4nfMREplSSpwLeA\u0026st=b4f2f9d962f4aed22dd39a9b941d5cfb" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/boom-162.dmg_.zip" + }, + { + "Text": "207989d2eef47b4a1e4003c4e518fd00", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=boom-162.dmg_.zip" + } + ] + }, + { + "Title": "boom-163.sit", + "Size": "2.93 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/boom-163.sit?expires=1776744391\u0026token=E_oJlbx7uk8-qLcbQcXbMQ\u0026st=860db2dc3f7ad72316844269e6b09229" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/boom-163.sit" + }, + { + "Text": "0ce6b14956306396018580bc24b13462", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=boom-163.sit" + } + ] + }, + { + "Title": "BOOMEdit-32.dmg_.tgz", + "Size": "304.07 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/BOOMEdit-32.dmg_.tgz?expires=1776744391\u0026token=4rzSJjjjIQXEZH-NF8EuvQ\u0026st=5022a0fbe4c75c9a4b034dea674ec1c0" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/BOOMEdit-32.dmg_.tgz" + }, + { + "Text": "c5eacec146953c88586dea458917974f", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=BOOMEdit-32.dmg_.tgz" + } + ] + }, + { + "Title": "boom-206.dmg", + "Size": "19.11 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS X", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/boom-206.dmg?expires=1776744391\u0026token=ltkk1mxq27c6dAWXQ0HnkA\u0026st=14ba5dda1b293a7bceda0126adfe5408" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/boom-206.dmg" + }, + { + "Text": "a2b556569076123ecbebf10ae6658a0a", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=boom-206.dmg" + } + ] + } + ], + "Screenshots": [ + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/boom_1.png", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/boom_2.png", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/boom_3.png", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Factor_Software_Logo.png", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Loading_Screen.png" + ] + } + }, + "http://macintoshgarden.org/games/mac-bestsellers-3-games": { + "fetched_at": "2026-04-21T11:06:07.844497+10:00", + "software_item": { + "Title": "Mac BestSellers 3 Games", + "URL": "http://macintoshgarden.org/games/mac-bestsellers-3-games", + "Description": "Toast Image of a Install Disc for 3 Games, Super Tetris 1.0, Spectre 1.0, and HellCats over the Pacific 1.0.4", + "Downloads": [ + { + "Title": "MAC_BEST_SELLERS.toast", + "Size": "645.88 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 7.0 - 7.6\t\t\t\t\t\t\t\t\t\t - Mac OS 8.5 - 8.6", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/MAC_BEST_SELLERS.toast?expires=1776733867\u0026token=TzcgjL_jQJJ97URmhEJScw\u0026st=6a1d76ea7fe75933b03369bb63a45e7a" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/MAC_BEST_SELLERS.toast" + }, + { + "Text": "2fe7fe5332ffd0292195e31078cbd348", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=MAC_BEST_SELLERS.toast" + } + ] + } + ], + "Screenshots": [ + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/MAC_BEST_SELLERS.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Tetris1.png", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/Tetris2.png" + ] + } + }, + "http://macintoshgarden.org/games/spectre": { + "fetched_at": "2026-04-21T11:26:49.2907901+10:00", + "software_item": { + "Title": "Spectre", + "URL": "http://macintoshgarden.org/games/spectre", + "Description": "Capture the flags and destroy the other tanks in a polygonal 3D world. Similar to the Battlezone vector arcade game in style and gameplay, but with new elements from deathmatch/CTF-style games. This first Spectre game was exclusively for Macintosh, though some later iterations for other machines were also simply called \"Spectre.\"\n\n1st download contains DiskDup disk images of an early Beta version, as well as Version 1.0, Spectre Challenger \u0026 v1.0.2 (without \"Challenger\" branding), a World Editor, Cheats, Full Documentation, Product Photos, and Serials — all enclosed in a MacZip \".zip\" archive.\n2nd download Three different Spectre 1.0 applications in a Mar archive which you can unpack on just about every vintage Mac.\n3rd download Spectre 1.1d16 in a StuffIt archive.\n4th download is the World Editor by John Lindal, that lets you edit the world grid and colors.\n4th download is a demo of v1.0b2.\n5th download is a demo of v1.0.\n6th download is the Custom Spectre Editor which gives you speed up to 18, ammunition up to 127, and invincible shields!\n\nRead more at Wikipedia.\n\nSee the sequels: Spectre Supreme and Spectre VR.\n\nSpectre3D.com, homepage of the discontinued Spectre remake for iOS, now hosts a browser version of it in playable alpha state.\n\nMac Plus or later, System 6.0.3 or later, b/w or 16 or 256 colors.\n\nIf extracting the DL #1 zip archive on a classic Mac OS. - For best results use MacZip to correctly restore the extracted Resource fork data and Data \u0026 File Type information of the contents.", + "Downloads": [ + { + "Title": "spectre.zip", + "Size": "9.09 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/spectre.zip?expires=1776735108\u0026token=7zXm1D8LAOnK-kBC3ouEpg\u0026st=cfe1fa841b74a73bdc233e830d718e0c" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/spectre.zip" + }, + { + "Text": "022f313a8e9867deedf9dd3407c63f9f", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=spectre.zip" + } + ] + }, + { + "Title": "Spectre_1.0.mar", + "Size": "3.34 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/Spectre_1.0.mar?expires=1776735108\u0026token=j17mrTmEAZnmW4kZkQo-DQ\u0026st=979b840abcea2e59271c48cabc1a2097" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/Spectre_1.0.mar" + }, + { + "Text": "09b60f053fc383936dad4d82bc1a0479", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Spectre_1.0.mar" + } + ] + }, + { + "Title": "Spectre_1.1.sit", + "Size": "765.80 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/Spectre_1.1.sit?expires=1776735108\u0026token=aAFsLB1c2Oz7d5XnVqGlYg\u0026st=4a0cb041b603bd0d14a6a52c53923197" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/Spectre_1.1.sit" + }, + { + "Text": "8f8f95decf1e7caffa638798a302e1e9", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Spectre_1.1.sit" + } + ] + }, + { + "Title": "Spectre_World_Editor_2.1.sit", + "Size": "74.55 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/Spectre_World_Editor_2.1.sit?expires=1776735108\u0026token=sRGt8xsbCTtcY8RWhhXWSg\u0026st=cf9e001a8da24fd8a101d70467174acd" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/Spectre_World_Editor_2.1.sit" + }, + { + "Text": "6b5639eca9e21ecde3e80f8a2040d61c", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Spectre_World_Editor_2.1.sit" + } + ] + }, + { + "Title": "SpectreDemo.sit", + "Size": "253.32 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/SpectreDemo.sit?expires=1776735108\u0026token=wGWiUKLDuNPyk0R0tHejdA\u0026st=ad7824728d6fc0c514705f147f6c423d" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/SpectreDemo.sit" + }, + { + "Text": "5664ead985c70b064f3b83c12dfc5d47", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=SpectreDemo.sit" + } + ] + }, + { + "Title": "Spectre10_demo.sit", + "Size": "242.30 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/Spectre10_demo.sit?expires=1776735108\u0026token=ndRuhgETZj3EfT4NypTosQ\u0026st=44e5481518f3f2573f69e87fa222756b" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/Spectre10_demo.sit" + }, + { + "Text": "683427a1569c3270b5868965af991353", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Spectre10_demo.sit" + } + ] + }, + { + "Title": "Custom_Spectre_Editor_v1.0.sit", + "Size": "21.72 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "http://download.macintoshgarden.org/games/Custom_Spectre_Editor_v1.0_0.sit?expires=1776735108\u0026token=qs4rFmaBgczNPx3apu_ULg\u0026st=0ca319f7a8cc3c8ddc959b5b7a7e9a04" + }, + { + "Text": "mirror", + "URL": "http://old.mac.gdn/games/Custom_Spectre_Editor_v1.0_0.sit" + }, + { + "Text": "9ab427270a4bf1580ee625a7d73656bb", + "URL": "http://macintoshgarden.org/arch_md5.php?filename=Custom_Spectre_Editor_v1.0_0.sit" + } + ] + } + ], + "Screenshots": [ + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/spectre1.png", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/spectre_2.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/spectre_3.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_17_0.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_18_0.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_19_0.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_20_0.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_21_0.jpg", + "http://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_22_0.jpg" + ] + } + }, + "https://macintoshgarden.org/games/mac-bzone": { + "fetched_at": "2026-04-21T09:05:05.7251841+10:00", + "software_item": { + "Title": "Mac Bzone", + "URL": "https://macintoshgarden.org/games/mac-bzone", + "Description": "Mac Bzone is an unofficial Macintosh version of the 3D tank battle Battlezone (the ancestor of Spectre). You, the valiant but lone tank commander, vs a slower but infinite enemy. The enemy will always win, in time, but how much time you can buy depends on your skill as a tank commander.\n\nDownload #1: v1.3.1, including readme, encoded as a mar archive\nDownload #2: original upload of 1.3.1, application only.\n\nSerial- 5000 /Code-48406914\n\nMac Bzone requires system 6.0.7 or higher and a 68020+68881 combination or higher (it requires an FPU). It will run on black and white systems and will take some advantage of color on color systems (it runs best in 1bit or 4bit modes).", + "Downloads": [ + { + "Title": "MACBZONE_1.3.1.sea_.mac", + "Size": "119.13 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "https://download.macintoshgarden.org/games/MACBZONE_1.3.1.sea_.mac?expires=1776637641\u0026token=bVJAb9Hg8rcTw8wGIFSndA\u0026st=0a532012091baa84873e7b834616c87c" + }, + { + "Text": "mirror", + "URL": "https://old.mac.gdn/games/MACBZONE_1.3.1.sea_.mac" + }, + { + "Text": "8e01dc7db0435e636149f89d172f61cc", + "URL": "https://macintoshgarden.org/arch_md5.php?filename=MACBZONE_1.3.1.sea_.mac" + } + ] + }, + { + "Title": "mac_bzone.sit", + "Size": "88.42 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "https://download.macintoshgarden.org/games/mac_bzone_0.sit?expires=1776637642\u0026token=OTn6fdNUutiSv9Et6C3SbQ\u0026st=7542aaeb66d11b4d2c462ffd74c82854" + }, + { + "Text": "mirror", + "URL": "https://old.mac.gdn/games/mac_bzone_0.sit" + }, + { + "Text": "d4f8287ce87caa1a21ed134d7b795d1c", + "URL": "https://macintoshgarden.org/arch_md5.php?filename=mac_bzone_0.sit" + } + ] + } + ], + "Screenshots": [ + "https://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/mbzone.png" + ] + } + }, + "https://macintoshgarden.org/games/spectre": { + "fetched_at": "2026-04-21T08:09:35.3637366+10:00", + "software_item": { + "Title": "Spectre", + "URL": "https://macintoshgarden.org/games/spectre", + "Description": "Capture the flags and destroy the other tanks in a polygonal 3D world. Similar to the Battlezone vector arcade game in style and gameplay, but with new elements from deathmatch/CTF-style games. This first Spectre game was exclusively for Macintosh, though some later iterations for other machines were also simply called \"Spectre.\"\n\n1st download contains DiskDup disk images of an early Beta version, as well as Version 1.0, Spectre Challenger \u0026 v1.0.2 (without \"Challenger\" branding), a World Editor, Cheats, Full Documentation, Product Photos, and Serials — all enclosed in a MacZip \".zip\" archive.\n2nd download Three different Spectre 1.0 applications in a Mar archive which you can unpack on just about every vintage Mac.\n3rd download Spectre 1.1d16 in a StuffIt archive.\n4th download is the World Editor by John Lindal, that lets you edit the world grid and colors.\n4th download is a demo of v1.0b2.\n5th download is a demo of v1.0.\n6th download is the Custom Spectre Editor which gives you speed up to 18, ammunition up to 127, and invincible shields!\n\nRead more at Wikipedia.\n\nSee the sequels: Spectre Supreme and Spectre VR.\n\nSpectre3D.com, homepage of the discontinued Spectre remake for iOS, now hosts a browser version of it in playable alpha state.\n\nMac Plus or later, System 6.0.3 or later, b/w or 16 or 256 colors.\n\nIf extracting the DL #1 zip archive on a classic Mac OS. - For best results use MacZip to correctly restore the extracted Resource fork data and Data \u0026 File Type information of the contents.", + "Downloads": [ + { + "Title": "spectre.zip", + "Size": "9.09 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "https://download.macintoshgarden.org/games/spectre.zip?expires=1776648516\u0026token=KCvtq4q6h_i6oVW49j6PmA\u0026st=577457eea75029c7b9057937283f3c68" + }, + { + "Text": "mirror", + "URL": "https://old.mac.gdn/games/spectre.zip" + }, + { + "Text": "022f313a8e9867deedf9dd3407c63f9f", + "URL": "https://macintoshgarden.org/arch_md5.php?filename=spectre.zip" + } + ] + }, + { + "Title": "Spectre_1.0.mar", + "Size": "3.34 MB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "https://download.macintoshgarden.org/games/Spectre_1.0.mar?expires=1776648516\u0026token=MtLvLozY4L2g1136s0D-aw\u0026st=f59145f2a074e6c4e3c75cff07b62a03" + }, + { + "Text": "mirror", + "URL": "https://old.mac.gdn/games/Spectre_1.0.mar" + }, + { + "Text": "09b60f053fc383936dad4d82bc1a0479", + "URL": "https://macintoshgarden.org/arch_md5.php?filename=Spectre_1.0.mar" + } + ] + }, + { + "Title": "Spectre_1.1.sit", + "Size": "765.80 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "https://download.macintoshgarden.org/games/Spectre_1.1.sit?expires=1776648516\u0026token=3KxrNa-zwX2uJw3d9bFQYQ\u0026st=98228cb46c4ef8b4c9a37e745f0f79d0" + }, + { + "Text": "mirror", + "URL": "https://old.mac.gdn/games/Spectre_1.1.sit" + }, + { + "Text": "8f8f95decf1e7caffa638798a302e1e9", + "URL": "https://macintoshgarden.org/arch_md5.php?filename=Spectre_1.1.sit" + } + ] + }, + { + "Title": "Spectre_World_Editor_2.1.sit", + "Size": "74.55 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "https://download.macintoshgarden.org/games/Spectre_World_Editor_2.1.sit?expires=1776648516\u0026token=-WOUcGbR8OOHIe_7LfiF6w\u0026st=d03ea8e27415bd2f98c7a5e76bd0b2ca" + }, + { + "Text": "mirror", + "URL": "https://old.mac.gdn/games/Spectre_World_Editor_2.1.sit" + }, + { + "Text": "6b5639eca9e21ecde3e80f8a2040d61c", + "URL": "https://macintoshgarden.org/arch_md5.php?filename=Spectre_World_Editor_2.1.sit" + } + ] + }, + { + "Title": "SpectreDemo.sit", + "Size": "253.32 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "https://download.macintoshgarden.org/games/SpectreDemo.sit?expires=1776648516\u0026token=63oJsG1pGY1PjbTLpq3z0A\u0026st=f2b0a299fdfb9dd72ec6c3b6059bab8e" + }, + { + "Text": "mirror", + "URL": "https://old.mac.gdn/games/SpectreDemo.sit" + }, + { + "Text": "5664ead985c70b064f3b83c12dfc5d47", + "URL": "https://macintoshgarden.org/arch_md5.php?filename=SpectreDemo.sit" + } + ] + }, + { + "Title": "Spectre10_demo.sit", + "Size": "242.30 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "https://download.macintoshgarden.org/games/Spectre10_demo.sit?expires=1776648516\u0026token=BruWeNJS1ImEsYpz7jumJQ\u0026st=a57f68b85cb857dfcfa0aeb0011b0c80" + }, + { + "Text": "mirror", + "URL": "https://old.mac.gdn/games/Spectre10_demo.sit" + }, + { + "Text": "683427a1569c3270b5868965af991353", + "URL": "https://macintoshgarden.org/arch_md5.php?filename=Spectre10_demo.sit" + } + ] + }, + { + "Title": "Custom_Spectre_Editor_v1.0.sit", + "Size": "21.72 KB", + "OS": "For\n\t\t\t\t\t\t\t\t\t\t\t\tSystem 6.x\t\t\t\t\t\t\t\t\t\t - Mac OS 9", + "Links": [ + { + "Text": "www", + "URL": "https://download.macintoshgarden.org/games/Custom_Spectre_Editor_v1.0_0.sit?expires=1776648516\u0026token=dU33fn2iJSuFrOT3ZiKIWA\u0026st=6eed3cf4895d82eec39240d56c94ce16" + }, + { + "Text": "mirror", + "URL": "https://old.mac.gdn/games/Custom_Spectre_Editor_v1.0_0.sit" + }, + { + "Text": "9ab427270a4bf1580ee625a7d73656bb", + "URL": "https://macintoshgarden.org/arch_md5.php?filename=Custom_Spectre_Editor_v1.0_0.sit" + } + ] + } + ], + "Screenshots": [ + "https://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/spectre1.png", + "https://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/spectre_2.jpg", + "https://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/spectre_3.jpg", + "https://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_17_0.jpg", + "https://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_18_0.jpg", + "https://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_19_0.jpg", + "https://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_20_0.jpg", + "https://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_21_0.jpg", + "https://macintoshgarden.org/sites/macintoshgarden.org/files/screenshots/s-l1600_22_0.jpg" + ] + } + } +} \ No newline at end of file diff --git a/.github/workflows/pr-ci.yml b/.github/workflows/pr-ci.yml index 77620b2..307806d 100644 --- a/.github/workflows/pr-ci.yml +++ b/.github/workflows/pr-ci.yml @@ -31,6 +31,76 @@ jobs: shell: bash run: bash scripts/ci/test.sh + quality: + name: Quality (vet + race + lint + vuln) + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: Install libpcap headers + run: | + sudo apt-get update + sudo apt-get install -y libpcap-dev + + - name: go vet + run: go vet ./... + + - name: Race-enabled tests + run: go test -tags all -race -count=1 ./... + + - name: golangci-lint + uses: golangci/golangci-lint-action@v8 + with: + version: latest + args: --build-tags=all + + - name: govulncheck + run: | + go install golang.org/x/vuln/cmd/govulncheck@latest + govulncheck -tags all ./... + + - name: gosec (untrusted-input paths) + run: | + go install github.com/securego/gosec/v2/cmd/gosec@latest + gosec -tags all ./service/macip/... ./service/macgarden/... ./service/afpfs/macgarden/... + + build-tags: + name: Build-tag matrix + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + tags: + - "" + - "afp" + - "afp macgarden" + - "afp macip" + - "afp macgarden macip" + - "afp sqlite_cnid" + - "all" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: Install libpcap headers + run: | + sudo apt-get update + sudo apt-get install -y libpcap-dev + + - name: Build with tags="${{ matrix.tags }}" + run: go build -tags "${{ matrix.tags }}" ./... + build: name: Build (${{ matrix.os }}) runs-on: ${{ matrix.os }} diff --git a/.github/workflows/release-main.yml b/.github/workflows/release-main.yml index 8962e5a..6cdf567 100644 --- a/.github/workflows/release-main.yml +++ b/.github/workflows/release-main.yml @@ -39,34 +39,67 @@ jobs: done < <(bash scripts/ci/compute-release-metadata.sh) build: - name: Build And Package (${{ matrix.os }}) + name: Build And Package (${{ matrix.os }} / ${{ matrix.variant }}) needs: version runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: include: + # Linux - all - os: ubuntu-latest + variant: all artifact_name: omnitalk-linux archive_name: omnitalk-${{ needs.version.outputs.release_tag }}-linux-amd64.tar.gz build_script: bash scripts/ci/build.sh package_script: bash scripts/ci/package-release.sh target_os: linux output: out/omnitalk + # Linux - router + - os: ubuntu-latest + variant: router + artifact_name: omnitalk-router-linux + archive_name: omnitalk-router-${{ needs.version.outputs.release_tag }}-linux-amd64.tar.gz + build_script: bash scripts/ci/build.sh + package_script: bash scripts/ci/package-release.sh + target_os: linux + output: out/omnitalk-router + # macOS - all - os: macos-latest + variant: all artifact_name: omnitalk-macos archive_name: omnitalk-${{ needs.version.outputs.release_tag }}-macos-amd64.zip build_script: bash scripts/ci/build.sh package_script: bash scripts/ci/package-release.sh target_os: macos output: out/omnitalk + # macOS - router + - os: macos-latest + variant: router + artifact_name: omnitalk-router-macos + archive_name: omnitalk-router-${{ needs.version.outputs.release_tag }}-macos-amd64.zip + build_script: bash scripts/ci/build.sh + package_script: bash scripts/ci/package-release.sh + target_os: macos + output: out/omnitalk-router + # Windows - all - os: windows-latest + variant: all artifact_name: omnitalk-windows archive_name: omnitalk-${{ needs.version.outputs.release_tag }}-windows-amd64.zip build_script: ./scripts/ci/build.ps1 package_script: ./scripts/ci/package-release.ps1 target_os: windows output: out/omnitalk.exe + # Windows - router + - os: windows-latest + variant: router + artifact_name: omnitalk-router-windows + archive_name: omnitalk-router-${{ needs.version.outputs.release_tag }}-windows-amd64.zip + build_script: ./scripts/ci/build.ps1 + package_script: ./scripts/ci/package-release.ps1 + target_os: windows + output: out/omnitalk-router.exe steps: - name: Checkout uses: actions/checkout@v4 @@ -89,6 +122,7 @@ jobs: BUILD_VERSION: ${{ needs.version.outputs.build_version }} BUILD_COMMIT: ${{ needs.version.outputs.commit_sha }} BUILD_DATE: ${{ needs.version.outputs.build_date }} + BUILD_VARIANT: ${{ matrix.variant }} OUTPUT: ${{ matrix.output }} run: ${{ matrix.build_script }} @@ -99,6 +133,7 @@ jobs: BUILD_VERSION: ${{ needs.version.outputs.build_version }} BUILD_COMMIT: ${{ needs.version.outputs.commit_sha }} BUILD_DATE: ${{ needs.version.outputs.build_date }} + BUILD_VARIANT: ${{ matrix.variant }} OUTPUT: ${{ matrix.output }} run: ${{ matrix.build_script }} @@ -109,6 +144,7 @@ jobs: TARGET_OS: ${{ matrix.target_os }} RELEASE_TAG: ${{ needs.version.outputs.release_tag }} BUILD_VERSION: ${{ needs.version.outputs.build_version }} + BUILD_VARIANT: ${{ matrix.variant }} run: ${{ matrix.package_script }} - name: Package release (Windows) @@ -118,6 +154,7 @@ jobs: TARGET_OS: ${{ matrix.target_os }} RELEASE_TAG: ${{ needs.version.outputs.release_tag }} BUILD_VERSION: ${{ needs.version.outputs.build_version }} + BUILD_VARIANT: ${{ matrix.variant }} run: ${{ matrix.package_script }} - name: Upload build artifact diff --git a/.gitignore b/.gitignore index 353890c..88c9d22 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,6 @@ go.work.sum # Generated by scripts/ci/build.ps1 /cmd/omnitalk/resource.syso /cmd/omnitalk/versioninfo.json + +._htmlcache/ +.macgarden/ diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..369baac --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,42 @@ +version: "2" + +run: + timeout: 5m + build-tags: + - all + +linters: + default: none + enable: + - errcheck + - errorlint + - gocritic + - govet + - ineffassign + - misspell + - revive + - staticcheck + - unused + +linters-settings: + errorlint: + errorf: true + asserts: true + comparison: true + revive: + rules: + - name: var-naming + - name: package-comments + - name: exported + disabled: true + gocritic: + disabled-checks: + - ifElseChain + - singleCaseSwitch + +issues: + exclude-rules: + - path: _test\.go$ + linters: + - errcheck + - revive diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 0000000..9c13d55 --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,223 @@ +# OmniTalk Architecture + +OmniTalk is a Go AppleTalk Phase 2 router and AFP file server. It bridges +legacy Apple networking protocols to modern environments — EtherTalk +(raw Ethernet), LToUDP (multicast UDP), TashTalk (serial), and +virtual LocalTalk transports — and serves AFP volumes over both the +classic ASP/ATP/DDP stack and modern DSI/TCP. + +This document is the entry point for contributors. Read it once and +you should know where any piece of code lives, why, and what it can +import. + +## Module map + +``` +cmd/omnitalk/ wiring only — flag/INI parsing, service registration +config/ single typed config tree; INI loader, validation +protocol/ wire format only (codec + constants, zero I/O) + ddp/ DDP datagram + MacRoman codec + (atp, asp, zip, rtmp, aep, llap, nbp to follow) +port/ link-layer transports (Port + RawLink) + ethertalk/ raw Ethernet via libpcap/Npcap, AARP + localtalk/ LocalTalk + LToUDP/TashTalk/Virtual backends + rawlink/ generic raw L2 link abstraction + nat/ OS-stack NAT helper (used by macip) +router/ Router, RoutingTable, ZoneInformationTable +service/ stateful services; compose protocol + port + afp/ Apple Filing Protocol server + asp/ dsi/ AFP transports (classic and modern) + atp/ AppleTalk Transaction Protocol + zip/ Zone Information Protocol + rtmp/ Routing Table Maintenance Protocol + aep/ AppleTalk Echo Protocol + llap/ LocalTalk Link Access Protocol + macip/ IP-over-AppleTalk gateway with NAT and DHCP relay + macgarden/ Macintosh Garden HTTP client (used by macgarden VFS) +pkg/ reusable, AppleTalk-agnostic + binutil/ allocation-free wire codec helpers, Wire interface + appledouble/ AppleDouble v2 sidecar format (parse/build) + cnid/ AFP Catalog Node IDs (memory + SQLite stores) + logging/ slog factory: handler config, level parsing + telemetry/ Counter/Gauge/Histogram via expvar (otel build tag) +netlog/ project logging API — Debug/Info/Warn facade over slog +spec/ Apple protocol references (read this when touching wire code) +``` + +## Layering rules + +``` +cmd → service → (protocol | port | pkg) + ↓ ↓ + (no I/O) (port-side) +``` + +- `protocol/*` has zero I/O, zero goroutines, zero state. Pure + encode/decode and constants. Cite the relevant `spec/` document in + the package doc comment. +- `port/*` owns the link layer. It knows about frames and addresses, + not about higher protocols. +- `service/*` owns sockets, sessions, and state machines. It composes + `protocol` codecs over `port` transports. +- `pkg/*` is reusable outside OmniTalk. It must not import anything + under `service/`, `port/`, `cmd/`, or `router/`. +- `internal/*` is private to OmniTalk. Mocks and shared test harness + live here. +- `cmd/omnitalk/` does no business logic. It parses configuration + and wires services together. + +## Core interfaces + +| Interface | Where | Purpose | +|---|---|---| +| `port.Port` | [port/port.go](port/port.go) | Unicast/Broadcast/Multicast frame transport | +| `port.BridgeConfigurable` | [port/port.go](port/port.go) | Optional bridge-mode and host-MAC knobs | +| `port/localtalk.FrameSender` | [port/localtalk/localtalk.go](port/localtalk/localtalk.go) | Backend hook for LocalTalk variants | +| `port/rawlink.RawLink` | [port/rawlink/](port/rawlink/) | Raw L2 read/write — used by EtherTalk and MacIP | +| `service.Service` | [service/service.go](service/service.go) | Object plugged into the router by socket | +| `service.Router` | [service/service.go](service/service.go) | What services see of the router | +| `afp.FileSystem` | [service/afp/fs.go](service/afp/fs.go) | Pluggable AFP volume backend | +| `cnid.Store` | [pkg/cnid/cnid.go](pkg/cnid/cnid.go) | Catalog Node ID persistence | +| `binutil.Wire` (canonical shape) | [pkg/binutil/binutil.go](pkg/binutil/binutil.go) | `MarshalWire`/`UnmarshalWire`/`WireSize` | + +## Configuration + +Single typed tree in `config/`. Two loaders feed it: + +1. TOML — `config.Load(path)` parses `server.toml` via `knadh/koanf` + with the `pelletier/go-toml` v2 parser. +2. Flags — `cmd/omnitalk/main.go` overlays CLI flags on top of the + file defaults. + +`config.Root.Validate()` runs once before services start. Services +receive typed subtrees at construction time. Construction options +are immutable: ports do not mutate themselves after `Start()`. + +## Logging and telemetry + +OmniTalk has two logging packages with distinct jobs: + +- **`netlog/`** is the call-site API. Services and ports use + `netlog.Debug`, `netlog.Info`, `netlog.Warn`. The facade keeps call + sites short (no per-package `*slog.Logger` plumbing) while still + routing through whatever structured handler `cmd/omnitalk` installs. +- **`pkg/logging/`** is the slog factory used once at startup. + `cmd/omnitalk` calls `logging.New("OmniTalk", ...)` to build a + `*slog.Logger` with the configured handler (console, JSON, or both) + and installs it via `netlog.SetLogger`. Use this directly only when + you need a `*slog.Logger` value — e.g. attaching structured fields + with `.With` for the lifetime of an object. + +Sources are tagged in two complementary ways: messages carry a +`[AFP]` / `[ASP]` / `[EtherTalk]` prefix that grep finds in either +format, and the slog handler stamps every record with a `source` +attribute that JSON consumers can filter on. + +Stdlib `log.Printf` and `log.Fatal` are not used inside library code. +`cmd/omnitalk/main.go` uses `log.Fatal*` only for unrecoverable startup +errors before any logger is wired. + +Telemetry is `pkg/telemetry`, separate from logs. Default backend is +`expvar` (stdlib, zero deps). Initial counters: +- `omnitalk_router_frames_in_total` +- `omnitalk_afp_commands_total` +- `omnitalk_aarp_probe_retries_total` + +A future `//go:build otel` file will swap in an OpenTelemetry backend +without touching call sites. + +## Wire codec convention + +The canonical shape lives in [pkg/binutil/binutil.go](pkg/binutil/binutil.go): + +```go +type Wire interface { + MarshalWire(b []byte) (n int, err error) // append-style, no alloc + UnmarshalWire(b []byte) (n int, err error) + WireSize() int +} +``` + +Implementations live alongside their model types. `pkg/binutil` provides +allocation-free `PutU8/16/32/64`, `GetU8/16/32/64`, and Pascal-string +helpers. Errors: +- `binutil.ErrShortBuffer` for buffer-too-small. +- `binutil.ErrMalformed` for invalid prefixes / enum values. + +Migrated so far: ASP `WriteContinuePacket`, ATP `ATPHeader`, DSI `Header`. +Other wire models still use raw `binary.BigEndian` calls; migration +proceeds one type per commit with golden hex round-trip tests. + +## Timer and retry patterns + +OmniTalk does not use exponential backoff. The protocols predate it. +Three canonical shapes: + +1. **Reliable-delivery retransmits** (ATP-style). Per-transaction + `retryTimeout` + `retriesLeft` counter, an injectable `Clock.AfterFunc` + so tests control time. Exemplar: `service/atp/transaction.go`. +2. **Periodic polling** (AARP probe, AMT aging, routing-table aging). + `time.NewTicker` from a goroutine that selects on `<-ctx.Done()` + (or `<-stop`). The tick cadence *is* the policy. Exemplar: + `port/ethertalk/ethertalk.go:acquireAddressRun`. +3. **One-shot waits** (LocalTalk CTS response, DSI request/reply). + `time.NewTimer` + `select { case <-timer.C: ...; case <-resp: ... }`. + +If a future consumer genuinely needs exponential backoff, extract it +then. Don't speculate. + +## AFP architecture + +AFP supports two transport stacks simultaneously: +- **Classic**: DDP → ATP → ASP → AFP +- **Modern**: TCP → DSI → AFP + +Both deliver into a shared `afp.CommandHandler`. Today that handler is +the 525-line switch in [service/afp/server.go](service/afp/server.go). +A future commit decomposes it into a registry of per-command handlers +under `service/afp/commands/`. + +AppleDouble metadata is stored either as `._filename` sidecars or in +`.appledouble/` folders (Netatalk-compatible). The sidecar **format** +lives in [pkg/appledouble](pkg/appledouble/); the AFP-specific +`ForkMetadataBackend` (which talks to the host filesystem) stays in +`service/afp/`. + +CNID tracking goes through [pkg/cnid](pkg/cnid/) with two backends: +in-memory (default for tests) and SQLite (`modernc.org/sqlite`, +default for production). Each volume gets its own `cnid.Store`. + +## File system backends + +`service/afp` defines `FileSystem` (see [service/afp/fs.go](service/afp/fs.go)). +The shipped backend is `LocalFileSystem`. A `macgarden_fs.go` backend +exists alongside it; a future commit relocates it to +`service/afpfs/macgarden/` behind `//go:build macgarden`, registered +through a factory map in `afp` so adding new backends does not modify +the core package. + +## Spec references + +The `spec/` directory contains 14 markdown documents describing the +protocols this codebase implements. Start with `spec/00-overview.md` +for DDP socket assignments and service interface contracts before +modifying router or service code. PRs touching protocol semantics +must cite the relevant section. + +## Glossary + +- **DDP**: Datagram Delivery Protocol. AppleTalk's network layer. +- **ATP**: AppleTalk Transaction Protocol. Reliable request/response. +- **ASP**: AppleTalk Session Protocol. Sessions over ATP. +- **DSI**: Data Stream Interface. AFP transport over TCP. +- **ZIP**: Zone Information Protocol. +- **RTMP**: Routing Table Maintenance Protocol. +- **AEP**: AppleTalk Echo Protocol. +- **NBP**: Name Binding Protocol. +- **AFP**: Apple Filing Protocol. +- **CNID**: Catalog Node ID. AFP's persistent file/directory identifier. +- **AppleDouble**: Sidecar format for storing resource forks and Finder + metadata on non-HFS filesystems. +- **AARP**: AppleTalk Address Resolution Protocol (Ethernet-side). +- **LLAP**: LocalTalk Link Access Protocol. +- **MacIP**: IP-over-AppleTalk gateway protocol. diff --git a/CLAUDE.md b/CLAUDE.md index b3562b3..21f8b17 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -6,7 +6,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co OmniTalk is a Go-based AppleTalk Phase 2 router and AFP file server. It bridges legacy Apple networking protocols to modern environments, supporting EtherTalk (raw Ethernet), LToUDP (multicast UDP), TashTalk (serial), and virtual LocalTalk transports. -**Module:** `github.com/pgodw/omnitalk/go` +**Module:** `github.com/pgodw/omnitalk` **Go version:** 1.23.0 ## Commands @@ -21,8 +21,8 @@ go test ./... # Run tests for a specific package go test ./service/afp/... -# Run with INI config -./omnitalk # auto-loads server.ini if present +# Run with TOML config +./omnitalk # auto-loads server.toml if present # Run with flags (see README.md for full list) ./omnitalk -ethertalk eth0 -zone "MyZone" @@ -36,7 +36,7 @@ go test ./service/afp/... cmd/omnitalk/main.go → Ports → Router → Services ``` -1. **Entry point** (`cmd/omnitalk/`) parses CLI flags and `server.ini`, constructs ports, wires them to the router, and starts services. +1. **Entry point** (`cmd/omnitalk/`) parses CLI flags and `server.toml`, constructs ports, wires them to the router, and starts services. 2. **Router** (`router/`) receives DDP datagrams from all ports, maintains the `RoutingTable` and `ZoneInformationTable`, and dispatches to services by socket number or forwards to other ports. 3. **Ports** (`port/`) abstract network interfaces. All implement `port.Port` (Unicast/Broadcast/Multicast). Implementations: `ethertalk`, `localtalk/ltoudp`, `localtalk/tashtalk`, `localtalk/virtual`. 4. **Services** (`service/`) plug into the router by registering socket numbers. Each implements `service.Service`. @@ -68,7 +68,7 @@ AppleDouble metadata is stored either as `._filename` sidecars or in `.appledoub ### Configuration -Copy `server.ini.example` to `server.ini`. Sections: `[LToUdp]`, `[TashTalk]`, `[EtherTalk]`, `[MacIP]`, `[AFP]`, `[Volumes.*]`, `[Logging]`. File extension→type/creator mappings live in `extmap.conf` (Netatalk-compatible format). +Copy `server.toml.example` to `server.toml`. Format is TOML (parsed via `knadh/koanf` + `pelletier/go-toml`). Sections: `[LToUdp]`, `[TashTalk]`, `[EtherTalk]`, `[MacIP]`, `[AFP]`, `[Volumes.*]`, `[Logging]`. File extension→type/creator mappings live in `extmap.conf` (Netatalk-compatible format). ### Protocol Specifications diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..10ce0be --- /dev/null +++ b/Makefile @@ -0,0 +1,34 @@ +TAGS ?= all + +.PHONY: build test test-race test-tags lint vuln gosec fuzz clean + +build: + go build -tags "$(TAGS)" -o omnitalk ./cmd/omnitalk + +test: + go test -tags "$(TAGS)" ./... + +test-race: + go test -tags "$(TAGS)" -race -count=1 ./... + +test-tags: + bash scripts/ci/test.sh + +lint: + golangci-lint run --build-tags=all --timeout=5m + +vuln: + govulncheck -tags all ./... + +gosec: + gosec -tags all ./service/macip/... ./service/macgarden/... ./service/afpfs/macgarden/... + +fuzz: + @for dir in protocol/ddp protocol/atp protocol/asp protocol/nbp protocol/llap; do \ + echo "=== fuzz $$dir ==="; \ + go test -tags all -run=^$$ -fuzz=. -fuzztime=20s ./$$dir/... || exit 1; \ + done + +clean: + rm -f omnitalk omnitalk.exe + rm -rf out dist diff --git a/README.md b/README.md index 5973f1e..e250338 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,12 @@ +## Architecture + +For a guided tour of the codebase — package layout, layering rules, +core interfaces, logging/telemetry, and the AFP design — see +[ARCHITECTURE.md](ARCHITECTURE.md). + ## Features - Cross Platform Support: runs on Windows, MacOS and Linux. @@ -23,24 +29,24 @@ ## Quick start -- Copy server.ini.example to server.ini and edit values. -- Run OmniTalk with no flags to auto-load server.ini. +- Copy server.toml.example to server.toml and edit values. +- Run OmniTalk with no flags to auto-load server.toml. - Or pass a config file explicitly with -config. Examples: ~~~bash -./omnitalk -config server.ini +./omnitalk -config server.toml ~~~ ~~~powershell -.\omnitalk.exe -config server.ini +.\omnitalk.exe -config server.toml ~~~ Config-loading rule: - -config cannot be combined with other flags. -- If no flags are supplied, OmniTalk auto-loads server.ini if present. +- If no flags are supplied, OmniTalk auto-loads server.toml if present. --- @@ -147,18 +153,18 @@ Tip: [install Npcap](https://npcap.com/#download) first, otherwise pcap devices ### Example interface configs (Linux, macOS, Windows) -These examples show only relevant keys; merge into your full server.ini. +These examples show only relevant keys; merge into your full server.toml. Linux example: -~~~ini +~~~toml [LToUdp] enabled = true -interface = 192.168.1.10 +interface = "192.168.1.10" [EtherTalk] -backend = pcap -device = eth0 +backend = "pcap" +device = "eth0" hw_address = "DE:AD:BE:EF:CA:FE" seed_network_min = 3 seed_network_max = 5 @@ -167,14 +173,14 @@ seed_zone = "EtherTalk Network" macOS example: -~~~ini +~~~toml [LToUdp] enabled = true -interface = 192.168.1.20 +interface = "192.168.1.20" [EtherTalk] -backend = pcap -device = en0 +backend = "pcap" +device = "en0" hw_address = "DE:AD:BE:EF:CA:FE" seed_network_min = 3 seed_network_max = 5 @@ -183,16 +189,18 @@ seed_zone = "EtherTalk Network" Windows example: -~~~ini +~~~toml [LToUdp] enabled = true -interface = 0.0.0.0 +interface = "0.0.0.0" +# On Windows, use TOML literal strings (single quotes) so backslashes are not +# interpreted as escapes by the parser. [EtherTalk] -backend = pcap -device = "\Device\NPF_{1DFDAA9C-7DD4-40F8-B6D4-9298C273D654}" +backend = "pcap" +device = '\Device\NPF_{1DFDAA9C-7DD4-40F8-B6D4-9298C273D654}' hw_address = "DE:AD:BE:EF:CA:FE" -bridge_mode = auto +bridge_mode = "auto" seed_network_min = 3 seed_network_max = 5 seed_zone = "EtherTalk Network" @@ -278,17 +286,17 @@ Provide IP connectivity to AppleTalk clients via a MacIP gateway. Example NAT-oriented configuration: -~~~ini +~~~toml [MacIP] enabled = true -mode = nat +mode = "nat" zone = "EtherTalk Network" -nat_subnet = 192.168.100.0/24 -nat_gw = 192.168.100.1 -ip_gateway = 192.168.1.1 -nameserver = 192.168.1.1 +nat_subnet = "192.168.100.0/24" +nat_gw = "192.168.100.1" +ip_gateway = "192.168.1.1" +nameserver = "192.168.1.1" dhcp_relay = false -lease_file = leases.txt +lease_file = "leases.txt" ~~~ ### [MacIP] @@ -343,6 +351,8 @@ Unsupported or limited: ### [AFP] +These keys are server-wide; per-volume options live in `[Volumes.]` (see below). + | Key | Type | Default | Description | |---|---|---|---| | enabled | bool | true | Enables AFP service. | @@ -350,7 +360,12 @@ Unsupported or limited: | zone | string | (empty) | Zone for AFP registration. Empty uses router-selected default. | | protocols | string | tcp,ddp | Enabled AFP transports: tcp, ddp, or both comma-separated. | | binding | string | :548 | TCP listen address for DSI AFP. | -| extension_map | string | (empty) | Path to Netatalk-compatible extension map file. Relative paths are resolved from INI directory. | +| extension_map | string | (empty) | Path to Netatalk-compatible extension map file. Relative paths are resolved from the config file's directory. | +| use_decomposed_names | bool | true | Encode host-reserved filename characters as `0xNN` tokens in AFP mapping. Server-wide. | +| cnid_backend | string | sqlite | CNID backend used by all volumes: `sqlite` (when built with the `sqlite_cnid` or `all` tag) or `memory`. | +| desktop_backend | string | sqlite | Backend for the AFP desktop database (icons, APPL mappings, comments). | +| appledouble_mode | string | modern | Default metadata layout: `modern` (`._` sidecars) or `legacy` (`.AppleDouble/` directories). Volumes may override. | +| persistent_volume_ids | bool | true | Persist per-volume IDs across restarts so clients keep their aliases. | #### Filename mapping and encoding @@ -364,12 +379,12 @@ Behavior: Use `[AFP] extension_map` to provide Macintosh type/creator metadata for files based on extension. -Example in `server.ini`: +Example in `server.toml`: -~~~ini +~~~toml [AFP] enabled = true -extension_map = extmap.conf +extension_map = "extmap.conf" ~~~ Format rules: @@ -399,18 +414,19 @@ Notes: ### [Volumes.] -Each volume is configured as a separate `[Volumes.]` section. +Each volume is configured as a separate `[Volumes.]` section. The section suffix is used as the volume name unless `name` is set. + +> Note: `cnid_backend`, `use_decomposed_names`, and the default `appledouble_mode` are server-wide settings under `[AFP]` — they are not configurable per volume. A volume may override `appledouble_mode` to choose a sidecar layout that differs from the server default. | Key | Type | Default | Description | |---|---|---|---| | name | string | section suffix | Display name for the AFP volume (max 31 chars recommended). | -| path | string | none (required) | Host filesystem path to export. | +| path | string | required (except for `macgarden`) | Host filesystem path to export. For `fs_type = "macgarden"` a default path is derived from `name` if omitted. | +| fs_type | string | local_fs | Filesystem backend: `local_fs` (host disk) or `macgarden` (read-only virtual Macintosh Garden view, requires the `macgarden` or `all` build tag). | +| password | string | (empty) | Optional volume password. The internal cleartext-password path exists in code but is not exposed via the live authentication flow today. | | read_only | bool | false | Exports the volume as read-only at AFP protocol level. | -| cnid_backend | string | sqlite | CNID backend; currently sqlite or memory depending on build/runtime support. Must not conflict across volumes. | -| use_decomposed_names | bool | true | Encodes host-reserved filename characters as 0xNN tokens in AFP mapping. Must not conflict across volumes. | -| fork_backend | string | (blank/AppleDouble) | Currently only AppleDouble is accepted when set. | -| appledouble_mode | string | modern | Metadata layout mode: modern (._ sidecars) or legacy (.appledouble directory style). | | rebuild_desktop_db | bool | false | Rebuilds AFP desktop database from resource fork metadata at startup. | +| appledouble_mode | string | inherits `[AFP] appledouble_mode` | Per-volume override of the metadata layout: `modern` (`._` sidecars) or `legacy` (`.AppleDouble/` directories). | #### Read-only volume behavior @@ -428,9 +444,9 @@ Error code behavior by AFP version: Example: -~~~ini +~~~toml [Volumes.Sample] -path = dist/Sample Volume +path = "dist/Sample Volume" read_only = true ~~~ @@ -441,17 +457,18 @@ Volume naming: #### Sidecar metadata -- `fork_backend` currently accepts AppleDouble storage. +- AppleDouble is the only resource-fork/metadata storage backend. - `appledouble_mode=modern` uses `._filename` sidecars beside files. - `appledouble_mode=legacy` uses `.AppleDouble/filename` sidecars. -- `rebuild_desktop_db=true` rebuilds desktop metadata cache at startup. +- The default mode comes from `[AFP] appledouble_mode`; individual volumes may override it. +- `rebuild_desktop_db=true` (per volume) rebuilds desktop metadata cache at startup. #### Netatalk compatibility - Compatible formats: Netatalk-style extension map syntax and AppleDouble modern/legacy sidecar layouts. - Known differences: CNID database implementation is OmniTalk-specific (sqlite or memory), not a drop-in Netatalk CNID store. - OmniTalk does not currently provide a Netatalk-style extended-attribute metadata backend. -- AFP feature coverage is practical but incomplete (for example catalog search is unsupported). +- AFP feature coverage is practical but incomplete (for example catalog search is currently implemented as name-based search and backend-dependent). ### [Logging] @@ -472,11 +489,11 @@ Common operational flags: - -parse-packets and -parse-output - -afp-volume (repeatable Name:Path) -Use server.ini for repeatable deployments; use flags for quick experiments. +Use server.toml for repeatable deployments; use flags for quick experiments. ## Rough project layout -- cmd/omnitalk: entrypoint, flag handling, INI loading, runtime wiring. +- cmd/omnitalk: entrypoint, flag handling, TOML config loading, runtime wiring. - router: datagram dispatch, routing table, zone information table. - port: transport implementations (EtherTalk, LocalTalk variants, rawlink, NAT helpers). - service: protocol/application services (AEP, RTMP, ZIP, ASP/ATP/DSI, AFP, MacIP, LLAP). diff --git a/appletalk/datagram.go b/appletalk/datagram.go deleted file mode 100644 index 6d4e694..0000000 --- a/appletalk/datagram.go +++ /dev/null @@ -1,17 +0,0 @@ -package appletalk - -import "github.com/pgodw/omnitalk/go/protocol/ddp" - -const MaxDataLength = ddp.MaxDataLength - -type Datagram = ddp.Datagram - -func DDPChecksum(data []byte) uint16 { return ddp.Checksum(data) } - -func DatagramFromLongHeaderBytes(data []byte, verifyChecksum bool) (Datagram, error) { - return ddp.DatagramFromLongHeaderBytes(data, verifyChecksum) -} - -func DatagramFromShortHeaderBytes(destinationNode, sourceNode uint8, data []byte) (Datagram, error) { - return ddp.DatagramFromShortHeaderBytes(destinationNode, sourceNode, data) -} diff --git a/appletalk/macroman.go b/appletalk/macroman.go deleted file mode 100644 index e799bd1..0000000 --- a/appletalk/macroman.go +++ /dev/null @@ -1,11 +0,0 @@ -package appletalk - -import "github.com/pgodw/omnitalk/go/encoding" - -func MacRomanToUpper(b []byte) []byte { return encoding.MacRomanToUpper(b) } - -func MacRomanToLower(b []byte) []byte { return encoding.MacRomanToLower(b) } - -func MacRomanToUTF8(b []byte) string { return encoding.MacRomanToUTF8(b) } - -func UTF8ToMacRoman(s string) []byte { return encoding.UTF8ToMacRoman(s) } diff --git a/appletalk/packet.go b/appletalk/packet.go deleted file mode 100644 index 4b3f03b..0000000 --- a/appletalk/packet.go +++ /dev/null @@ -1,9 +0,0 @@ -package appletalk - -// Packet is a generic protocol packet contract used by protocol layers that -// support binary wire encoding/decoding and structured log formatting. -type Packet interface { - String() string - Marshal() []byte - Unmarshal(data []byte) error -} diff --git a/cmd/omnitalk/afp_disabled.go b/cmd/omnitalk/afp_disabled.go new file mode 100644 index 0000000..cd74146 --- /dev/null +++ b/cmd/omnitalk/afp_disabled.go @@ -0,0 +1,27 @@ +//go:build !afp && !all + +package main + +import ( + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/service" +) + +type afpHookDisabled struct{} + +func (afpHookDisabled) Services() []service.Service { return nil } +func (afpHookDisabled) AttachMacIP(_ AFPSessionHooks) {} + +// wireAFP is the no-op stub used when the binary is built without the +// afp tag. It logs a warning if the operator asked for AFP and returns +// a nil hook so the rest of main.go skips AFP wiring. +func wireAFP(in AFPWiring) (AFPHook, error) { + if in.FromConfig && in.Source.K != nil && in.Source.K.Exists("AFP") { + netlog.Warn("[MAIN][AFP] [AFP] section present in config but binary was built without -tags afp; ignoring") + } else if !in.FromConfig { + if len(in.Flags.VolumeFlagValues) > 0 || in.Flags.ExtensionMap != "" { + netlog.Warn("[MAIN][AFP] -afp-* flags set but binary was built without -tags afp; ignoring") + } + } + return afpHookDisabled{}, nil +} diff --git a/cmd/omnitalk/afp_enabled.go b/cmd/omnitalk/afp_enabled.go new file mode 100644 index 0000000..d4268a9 --- /dev/null +++ b/cmd/omnitalk/afp_enabled.go @@ -0,0 +1,176 @@ +//go:build afp || all + +package main + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/pgodw/omnitalk/config" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/service" + "github.com/pgodw/omnitalk/service/afp" + "github.com/pgodw/omnitalk/service/asp" + "github.com/pgodw/omnitalk/service/dsi" +) + +type afpHookEnabled struct { + services []service.Service + asp *asp.Service // nil when DDP transport disabled +} + +func (h *afpHookEnabled) Services() []service.Service { return h.services } + +func (h *afpHookEnabled) AttachMacIP(hooks AFPSessionHooks) { + if h == nil || h.asp == nil || hooks == nil { + return + } + h.asp.SetSessionLifecycleHooks( + func(sess *asp.Session) { hooks.OnOpen(sess.WSNet, sess.WSNode, sess.ID) }, + func(sess *asp.Session) { hooks.OnClose(sess.ID) }, + func(sess *asp.Session) { hooks.OnActivity(sess.ID) }, + ) +} + +// wireAFP builds the AFP file server, its transports (ASP over DDP and +// DSI over TCP), and returns a hook the rest of main.go uses to attach +// the resulting services to the router. +func wireAFP(in AFPWiring) (AFPHook, error) { + cfg := afp.DefaultConfig() + if in.FromConfig { + if err := loadAFPSection(in.Source, &cfg); err != nil { + return nil, err + } + } else { + applyAFPFlagsToConfig(in.Flags, &cfg) + } + + if !cfg.Enabled || len(cfg.Volumes) == 0 { + return &afpHookEnabled{}, nil + } + + vols, err := cfg.ResolvedVolumes() + if err != nil { + return nil, fmt.Errorf("AFP volume config: %w", err) + } + + var extMap *afp.ExtensionMap + if cfg.ExtensionMap != "" { + loaded, err := loadAFPExtensionMap(cfg.ExtensionMap) + if err != nil { + return nil, fmt.Errorf("failed loading AFP extension map %q: %w", cfg.ExtensionMap, err) + } + extMap = loaded + } + + hasDDP, hasTCP := splitAFPProtocols(cfg.Protocols) + + hook := &afpHookEnabled{} + var transports []afp.Transport + + if hasDDP { + aspSvc := asp.New(cfg.Name, nil, in.NBP, []byte(cfg.Zone)) + hook.asp = aspSvc + transports = append(transports, aspSvc) + netlog.Info("[MAIN][AFP] enabled DDP transport on socket %d", asp.ServerSocket) + } + + if hasTCP { + dsiSvc := dsi.NewServer(cfg.Name, cfg.Binding, nil) + transports = append(transports, dsiSvc) + netlog.Info("[MAIN][AFP] enabled TCP transport on %s", cfg.Binding) + } + + mode, err := afp.ParseAppleDoubleMode(cfg.AppleDoubleMode) + if err != nil { + return nil, fmt.Errorf("AFP: %w", err) + } + afpSvc := afp.NewService( + cfg.Name, + vols, + nil, + transports, + afp.Options{ + DecomposedFilenames: cfg.UseDecomposedNames, + CNIDBackend: cfg.CNIDBackend, + AppleDoubleMode: mode, + ExtensionMap: extMap, + PersistentVolumeIDs: cfg.PersistentVolumeIDs, + }, + ) + for _, t := range transports { + switch transport := t.(type) { + case *asp.Service: + transport.SetCommandHandler(afpSvc) + case *dsi.Server: + transport.SetCommandHandler(afpSvc) + } + } + + hook.services = append(hook.services, afpSvc) + netlog.Info("[MAIN][AFP] server=%q volumes=%d zone=%q protocols=%q", cfg.Name, len(vols), cfg.Zone, cfg.Protocols) + return hook, nil +} + +// loadAFPSection unmarshals [AFP] into cfg, validates it, and resolves +// a relative extension_map path against the config-file directory. +func loadAFPSection(src config.Source, cfg *afp.Config) error { + if err := loadSection(src.K, "AFP", cfg); err != nil { + return err + } + if cfg.ExtensionMap != "" && !filepath.IsAbs(cfg.ExtensionMap) && src.ConfigDir != "" { + cfg.ExtensionMap = filepath.Join(src.ConfigDir, cfg.ExtensionMap) + } + if !cfg.Enabled { + cfg.Volumes = nil + } + return nil +} + +func applyAFPFlagsToConfig(f AFPFlagInputs, cfg *afp.Config) { + if f.ServerName != "" { + cfg.Name = f.ServerName + } + cfg.Zone = f.Zone + if f.Protocols != "" { + cfg.Protocols = f.Protocols + } + if f.TCPAddr != "" { + cfg.Binding = f.TCPAddr + } + cfg.ExtensionMap = f.ExtensionMap + cfg.UseDecomposedNames = f.DecomposedNames + if f.CNIDBackend != "" { + cfg.CNIDBackend = f.CNIDBackend + } + if f.AppleDoubleMode != "" { + cfg.AppleDoubleMode = f.AppleDoubleMode + } + if len(f.VolumeFlagValues) == 0 { + return + } + if cfg.Volumes == nil { + cfg.Volumes = make(map[string]afp.VolumeConfig) + } + for _, raw := range f.VolumeFlagValues { + v, err := afp.ParseVolumeFlag(raw) + if err != nil { + netlog.Warn("[MAIN][AFP] %v", err) + continue + } + cfg.Volumes[v.Name] = v + } +} + +func splitAFPProtocols(s string) (ddp, tcp bool) { + for _, p := range strings.Split(s, ",") { + switch strings.ToLower(strings.TrimSpace(p)) { + case "ddp": + ddp = true + case "tcp": + tcp = true + } + } + return +} diff --git a/cmd/omnitalk/afp_hook.go b/cmd/omnitalk/afp_hook.go new file mode 100644 index 0000000..bbbc551 --- /dev/null +++ b/cmd/omnitalk/afp_hook.go @@ -0,0 +1,52 @@ +package main + +import ( + "github.com/pgodw/omnitalk/config" + "github.com/pgodw/omnitalk/service" + "github.com/pgodw/omnitalk/service/zip" +) + +// AFPHook is the cmd-layer abstraction over the optional AFP file +// server (and its ASP/DSI transports). The real implementation lives +// behind //go:build afp; the disabled stub returns a nil hook so +// router-only builds compile without pulling in the AFP subsystem. +type AFPHook interface { + // Services returns the services to register with the router. + Services() []service.Service + // AttachMacIP wires AFP's ASP session lifecycle to MacIP DHCP lease + // pinning. No-op when AFP runs DSI-only or MacIP is not built. + AttachMacIP(hooks AFPSessionHooks) +} + +// AFPSessionHooks bridges ASP session lifecycle events to MacIP without +// exposing service/asp at the cmd-neutral layer. +type AFPSessionHooks interface { + OnOpen(net uint16, node, sessID uint8) + OnClose(sessID uint8) + OnActivity(sessID uint8) +} + +// AFPFlagInputs collects the flag values required to build AFP when no +// TOML config file is in use. When -config is given, flagInputs is +// ignored and AFP reads its section from the config.Source instead. +type AFPFlagInputs struct { + ServerName string + Zone string + Protocols string + TCPAddr string + ExtensionMap string + DecomposedNames bool + CNIDBackend string + AppleDoubleMode string + VolumeFlagValues []string // raw "Name:Path" flag entries +} + +// AFPWiring is the input bundle for wireAFP. +type AFPWiring struct { + // Source is the loaded TOML, when -config was used. Zero value + // (Source{}) signals flag-only configuration. + Source config.Source + FromConfig bool + Flags AFPFlagInputs + NBP *zip.NameInformationService +} diff --git a/cmd/omnitalk/config_afp_test.go b/cmd/omnitalk/config_afp_test.go new file mode 100644 index 0000000..081ec71 --- /dev/null +++ b/cmd/omnitalk/config_afp_test.go @@ -0,0 +1,213 @@ +//go:build afp || all + +package main + +import ( + "os" + "path/filepath" + "testing" + + "github.com/pgodw/omnitalk/config" + "github.com/pgodw/omnitalk/service/afp" +) + +// loadAFPForTest is a small helper that mirrors what wireAFP does on +// the config-file path: load the TOML source and unmarshal [AFP] into +// an afp.Config, applying the same path resolution. +func loadAFPForTest(t *testing.T, path string) afp.Config { + t.Helper() + src, err := config.Load(path) + if err != nil { + t.Fatalf("config.Load: %v", err) + } + cfg := afp.DefaultConfig() + if err := loadAFPSection(src, &cfg); err != nil { + t.Fatalf("loadAFPSection: %v", err) + } + return cfg +} + +func TestLoadAFPConfig_VolumesAndExtensionMap(t *testing.T) { + dir := t.TempDir() + cfgPath := filepath.Join(dir, "server.toml") + content := `[AFP] +enabled = true +name = "OmniTalk" +zone = "EtherTalk Network" +protocols = "ddp,tcp" +binding = ":548" +extension_map = "extmap.conf" +cnid_backend = "memory" +use_decomposed_names = true + +[AFP.Volumes.Main] +name = "Main" +path = 'C:\Mac' +appledouble_mode = "legacy" +` + if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { + t.Fatalf("write config: %v", err) + } + + cfg := loadAFPForTest(t, cfgPath) + if cfg.ExtensionMap != filepath.Join(dir, "extmap.conf") { + t.Fatalf("ExtensionMap = %q, want %q", cfg.ExtensionMap, filepath.Join(dir, "extmap.conf")) + } + if cfg.CNIDBackend != "memory" { + t.Fatalf("CNIDBackend = %q", cfg.CNIDBackend) + } + if !cfg.UseDecomposedNames { + t.Fatal("UseDecomposedNames = false") + } + vols, err := cfg.ResolvedVolumes() + if err != nil { + t.Fatalf("ResolvedVolumes: %v", err) + } + if len(vols) != 1 || vols[0].Path != `C:\Mac` { + t.Fatalf("unexpected volumes: %#v", vols) + } + if vols[0].AppleDoubleMode != afp.AppleDoubleModeLegacy { + t.Fatalf("AppleDoubleMode = %q", vols[0].AppleDoubleMode) + } +} + +func TestLoadAFPConfig_PerVolumeAppleDoubleMode(t *testing.T) { + dir := t.TempDir() + cfgPath := filepath.Join(dir, "server.toml") + content := `[AFP.Volumes.Modern] +name = "Modern" +path = "/tmp/modern" +appledouble_mode = "modern" + +[AFP.Volumes.Legacy] +name = "Legacy" +path = "/tmp/legacy" +appledouble_mode = "legacy" +` + if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { + t.Fatalf("write config: %v", err) + } + + cfg := loadAFPForTest(t, cfgPath) + vols, err := cfg.ResolvedVolumes() + if err != nil { + t.Fatalf("ResolvedVolumes: %v", err) + } + if len(vols) != 2 { + t.Fatalf("want 2 vols, got %d", len(vols)) + } + byName := map[string]afp.VolumeConfig{} + for _, v := range vols { + byName[v.Name] = v + } + if byName["Modern"].AppleDoubleMode != afp.AppleDoubleModeModern { + t.Fatalf("Modern AppleDoubleMode = %q", byName["Modern"].AppleDoubleMode) + } + if byName["Legacy"].AppleDoubleMode != afp.AppleDoubleModeLegacy { + t.Fatalf("Legacy AppleDoubleMode = %q", byName["Legacy"].AppleDoubleMode) + } +} + +func TestLoadAFPConfig_PerVolumeFSType(t *testing.T) { + dir := t.TempDir() + cfgPath := filepath.Join(dir, "server.toml") + content := `[AFP.Volumes.Local] +name = "Local" +path = 'C:\Mac\Local' +fs_type = "local_fs" + +[AFP.Volumes.Garden] +name = "Garden" +path = 'C:\Mac\Garden' +fs_type = "macgarden" +` + if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { + t.Fatalf("write config: %v", err) + } + + cfg := loadAFPForTest(t, cfgPath) + vols, err := cfg.ResolvedVolumes() + if err != nil { + t.Fatalf("ResolvedVolumes: %v", err) + } + if len(vols) != 2 { + t.Fatalf("want 2 vols, got %d", len(vols)) + } + byName := map[string]afp.VolumeConfig{} + for _, v := range vols { + byName[v.Name] = v + } + if byName["Local"].FSType != afp.FSTypeLocalFS { + t.Fatalf("Local fs_type = %q", byName["Local"].FSType) + } + if byName["Garden"].FSType != afp.FSTypeMacGarden { + t.Fatalf("Garden fs_type = %q", byName["Garden"].FSType) + } +} + +func TestLoadAFPConfig_InvalidFSType(t *testing.T) { + dir := t.TempDir() + cfgPath := filepath.Join(dir, "server.toml") + content := `[AFP.Volumes.Bad] +name = "Bad" +path = 'C:\Mac\Bad' +fs_type = "bananas" +` + if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { + t.Fatalf("write config: %v", err) + } + src, err := config.Load(cfgPath) + if err != nil { + t.Fatalf("config.Load: %v", err) + } + cfg := afp.DefaultConfig() + if err := loadAFPSection(src, &cfg); err == nil { + t.Fatal("expected invalid fs_type error") + } +} + +func TestLoadAFPConfig_MacGardenWithoutPath(t *testing.T) { + dir := t.TempDir() + cfgPath := filepath.Join(dir, "server.toml") + content := `[AFP.Volumes.MacGarden] +name = "Mac Garden" +fs_type = "macgarden" +` + if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { + t.Fatalf("write config: %v", err) + } + cfg := loadAFPForTest(t, cfgPath) + vols, err := cfg.ResolvedVolumes() + if err != nil { + t.Fatalf("ResolvedVolumes: %v", err) + } + if len(vols) != 1 { + t.Fatalf("want 1 vol, got %d", len(vols)) + } + if vols[0].FSType != afp.FSTypeMacGarden { + t.Fatalf("fs_type = %q", vols[0].FSType) + } + if got, want := filepath.ToSlash(vols[0].Path), ".macgarden/Mac_Garden"; got != want { + t.Fatalf("generated path = %q, want %q", got, want) + } +} + +func TestLoadAFPConfig_LocalFSWithoutPathStillFails(t *testing.T) { + dir := t.TempDir() + cfgPath := filepath.Join(dir, "server.toml") + content := `[AFP.Volumes.Local] +name = "Local" +fs_type = "local_fs" +` + if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { + t.Fatalf("write config: %v", err) + } + src, err := config.Load(cfgPath) + if err != nil { + t.Fatalf("config.Load: %v", err) + } + cfg := afp.DefaultConfig() + if err := loadAFPSection(src, &cfg); err == nil { + t.Fatal("expected path required error for local_fs") + } +} diff --git a/cmd/omnitalk/config_flags.go b/cmd/omnitalk/config_flags.go new file mode 100644 index 0000000..329faa3 --- /dev/null +++ b/cmd/omnitalk/config_flags.go @@ -0,0 +1,96 @@ +package main + +import ( + "github.com/pgodw/omnitalk/port/ethertalk" + "github.com/pgodw/omnitalk/port/localtalk" +) + +// flagInputs collects raw values from the CLI flags. main.go derefs each +// pointer once and passes them here so flag-driven runs and config-file +// runs both produce a single appConfig that downstream wiring reads. +type flagInputs struct { + LogLevel string + LogTraffic bool + ParsePackets bool + ParseOutput string + + LToUDPEnabled bool + LToUDPInterface string + LToUDPSeedNetwork uint + LToUDPSeedZone string + + TashTalkPort string + TashTalkSeedNetwork uint + TashTalkSeedZone string + + EtherTalkDevice string + EtherTalkBackend string + EtherTalkHWAddress string + EtherTalkBridgeMode string + EtherTalkBridgeHostMAC string + EtherTalkSeedNetworkMin uint + EtherTalkSeedNetworkMax uint + EtherTalkSeedZone string + EtherTalkDesiredNetwork uint + EtherTalkDesiredNode uint + + MacIPEnabled bool + MacIPGWIP string + MacIPSubnet string + MacIPNameserver string + MacIPZone string + MacIPGatewayIP string + MacIPNAT bool + MacIPDHCPRelay bool + MacIPLeaseFile string +} + +// flagsToConfig builds an appConfig from CLI flag values. It is the +// flag-driven counterpart to loadConfigFromFile and is the only place +// that translates flag pointers into the unified config struct. +func flagsToConfig(in flagInputs) appConfig { + cfg := defaultAppConfig() + + cfg.LogLevel = in.LogLevel + cfg.LogTraffic = in.LogTraffic + cfg.ParsePackets = in.ParsePackets + cfg.ParseOutput = in.ParseOutput + + cfg.LToUDP = localtalk.LToUDPConfig{ + Enabled: in.LToUDPEnabled, + Interface: in.LToUDPInterface, + SeedNetwork: in.LToUDPSeedNetwork, + SeedZone: in.LToUDPSeedZone, + } + + cfg.TashTalk = localtalk.TashTalkConfig{ + Port: in.TashTalkPort, + SeedNetwork: in.TashTalkSeedNetwork, + SeedZone: in.TashTalkSeedZone, + } + + cfg.EtherTalk = ethertalk.Config{ + Device: in.EtherTalkDevice, + Backend: in.EtherTalkBackend, + HWAddress: in.EtherTalkHWAddress, + BridgeMode: in.EtherTalkBridgeMode, + BridgeHostMAC: in.EtherTalkBridgeHostMAC, + SeedNetworkMin: in.EtherTalkSeedNetworkMin, + SeedNetworkMax: in.EtherTalkSeedNetworkMax, + SeedZone: in.EtherTalkSeedZone, + DesiredNetwork: in.EtherTalkDesiredNetwork, + DesiredNode: in.EtherTalkDesiredNode, + } + + cfg.MacIPEnabled = in.MacIPEnabled + cfg.MacIPGWIP = in.MacIPGWIP + cfg.MacIPSubnet = in.MacIPSubnet + cfg.MacIPNameserver = in.MacIPNameserver + cfg.MacIPZone = in.MacIPZone + cfg.MacIPGatewayIP = in.MacIPGatewayIP + cfg.MacIPNAT = in.MacIPNAT + cfg.MacIPDHCPRelay = in.MacIPDHCPRelay + cfg.MacIPLeaseFile = in.MacIPLeaseFile + + return cfg +} diff --git a/cmd/omnitalk/config_ini.go b/cmd/omnitalk/config_ini.go index 989232a..c6a6ca4 100644 --- a/cmd/omnitalk/config_ini.go +++ b/cmd/omnitalk/config_ini.go @@ -2,37 +2,30 @@ package main import ( "fmt" - "path/filepath" - "strconv" "strings" - "github.com/pgodw/omnitalk/go/service/afp" - "gopkg.in/ini.v1" + "github.com/knadh/koanf/v2" + + "github.com/pgodw/omnitalk/config" + "github.com/pgodw/omnitalk/port/ethertalk" + "github.com/pgodw/omnitalk/port/localtalk" ) -type iniConfig struct { +// appConfig is the cmd-local view of resolved configuration. Each +// section is a typed Config struct owned by the package that consumes +// it. The same struct is populated either from a TOML file (via +// loadConfigFromFile) or from CLI flags (via flagsToConfig); downstream +// wiring reads only from this struct, never from flag pointers. AFP +// lives behind //go:build afp and is wired up separately via wireAFP. +type appConfig struct { LogLevel string LogTraffic bool ParsePackets bool ParseOutput string - LToUDPEnabled bool - LToUDPInterface string - LToUDPSeedNetwork uint - LToUDPSeedZone string - - TashTalkPort string - TashTalkSeedNetwork uint - TashTalkSeedZone string - - EtherTalkDevice string - EtherTalkBackend string - EtherTalkHWAddr string - EtherTalkBridgeMode string - EtherTalkBridgeHostMAC string - EtherTalkSeedNetworkMin uint - EtherTalkSeedNetworkMax uint - EtherTalkSeedZone string + LToUDP localtalk.LToUDPConfig + TashTalk localtalk.TashTalkConfig + EtherTalk ethertalk.Config MacIPEnabled bool MacIPNAT bool @@ -43,119 +36,56 @@ type iniConfig struct { MacIPDHCPRelay bool MacIPLeaseFile string MacIPZone string - - AFPEnabled bool - AFPServerName string - AFPZone string - AFPProtocols string - AFPTCPBinding string - AFPExtensionMapPath string - AFPDecomposedFilenames bool - AFPCNIDBackend string - AFPVolumes []afp.VolumeConfig } -func defaultINIConfig() iniConfig { - return iniConfig{ - LogLevel: "info", - LogTraffic: false, - ParsePackets: false, - ParseOutput: "", - - LToUDPEnabled: true, - LToUDPInterface: "0.0.0.0", - LToUDPSeedNetwork: 1, - LToUDPSeedZone: "LToUDP Network", - - TashTalkPort: "", - TashTalkSeedNetwork: 2, - TashTalkSeedZone: "TashTalk Network", - - EtherTalkDevice: "", - EtherTalkBackend: "pcap", - EtherTalkHWAddr: "DE:AD:BE:EF:CA:FE", - EtherTalkBridgeMode: "auto", - EtherTalkBridgeHostMAC: "", - EtherTalkSeedNetworkMin: 3, - EtherTalkSeedNetworkMax: 5, - EtherTalkSeedZone: "EtherTalk Network", +func defaultAppConfig() appConfig { + return appConfig{ + LogLevel: "info", - MacIPEnabled: false, - MacIPNAT: false, - MacIPSubnet: "192.168.100.0/24", - MacIPGWIP: "", - MacIPNameserver: "", - MacIPGatewayIP: "", - MacIPDHCPRelay: false, - MacIPLeaseFile: "", - MacIPZone: "", + LToUDP: localtalk.DefaultLToUDPConfig(), + TashTalk: localtalk.DefaultTashTalkConfig(), + EtherTalk: ethertalk.DefaultConfig(), - AFPEnabled: true, - AFPServerName: "Go File Server", - AFPZone: "", - AFPProtocols: "tcp,ddp", - AFPTCPBinding: ":548", - AFPExtensionMapPath: "", - AFPDecomposedFilenames: true, - AFPCNIDBackend: "sqlite", - AFPVolumes: nil, + MacIPSubnet: "192.168.100.0/24", } } -func loadConfigFromINI(path string) (iniConfig, error) { - cfg := defaultINIConfig() - - f, err := ini.Load(path) +// loadConfigFromFile loads and resolves the cmd-neutral sections of the +// TOML config. The raw config.Source is also returned so optional +// subsystems (currently AFP, behind //go:build afp) can lazily read +// their own sections without appConfig having to know about them. +func loadConfigFromFile(path string) (appConfig, config.Source, error) { + src, err := config.Load(path) if err != nil { - return cfg, err + return defaultAppConfig(), config.Source{}, err } - - lt := f.Section("LToUdp") - if cfg.LToUDPEnabled, err = parseBoolKey(lt, "enabled", cfg.LToUDPEnabled); err != nil { - return cfg, err - } - cfg.LToUDPInterface = parseStringKey(lt, "interface", cfg.LToUDPInterface) - if cfg.LToUDPSeedNetwork, err = parseUintKey(lt, "seed_network", cfg.LToUDPSeedNetwork); err != nil { - return cfg, err + cfg, err := resolveAppConfig(src) + if err != nil { + return defaultAppConfig(), src, err } - cfg.LToUDPSeedZone = parseStringKey(lt, "seed_zone", cfg.LToUDPSeedZone) + return cfg, src, nil +} - tt := f.Section("TashTalk") - cfg.TashTalkPort = parseStringKey(tt, "port", cfg.TashTalkPort) - if cfg.TashTalkSeedNetwork, err = parseUintKey(tt, "seed_network", cfg.TashTalkSeedNetwork); err != nil { - return cfg, err - } - cfg.TashTalkSeedZone = parseStringKey(tt, "seed_zone", cfg.TashTalkSeedZone) +func resolveAppConfig(src config.Source) (appConfig, error) { + cfg := defaultAppConfig() + k := src.K - et := f.Section("EtherTalk") - backend := strings.ToLower(parseStringKey(et, "backend", cfg.EtherTalkBackend)) - switch backend { - case "", "pcap", "tap", "tun": - // Empty backend disables EtherTalk and is handled by empty device string. - default: - return cfg, fmt.Errorf("[EtherTalk] backend must be blank, pcap, tap, or tun, got %q", backend) - } - cfg.EtherTalkBackend = backend - cfg.EtherTalkDevice = parseStringKey(et, "device", cfg.EtherTalkDevice) - if backend == "" { - cfg.EtherTalkDevice = "" - } - cfg.EtherTalkHWAddr = parseStringKey(et, "hw_address", cfg.EtherTalkHWAddr) - cfg.EtherTalkBridgeMode = parseStringKey(et, "bridge_mode", cfg.EtherTalkBridgeMode) - cfg.EtherTalkBridgeHostMAC = parseStringKey(et, "bridge_host_mac", cfg.EtherTalkBridgeHostMAC) - if cfg.EtherTalkSeedNetworkMin, err = parseUintKey(et, "seed_network_min", cfg.EtherTalkSeedNetworkMin); err != nil { + if err := loadSection(k, "LToUdp", &cfg.LToUDP); err != nil { return cfg, err } - if cfg.EtherTalkSeedNetworkMax, err = parseUintKey(et, "seed_network_max", cfg.EtherTalkSeedNetworkMax); err != nil { + if err := loadSection(k, "TashTalk", &cfg.TashTalk); err != nil { return cfg, err } - cfg.EtherTalkSeedZone = parseStringKey(et, "seed_zone", cfg.EtherTalkSeedZone) - - macipSection := f.Section("MacIP") - if cfg.MacIPEnabled, err = parseBoolKey(macipSection, "enabled", cfg.MacIPEnabled); err != nil { + if err := loadSection(k, "EtherTalk", &cfg.EtherTalk); err != nil { return cfg, err } - mode := strings.ToLower(parseStringKey(macipSection, "mode", "")) + cfg.EtherTalk.Backend = strings.ToLower(strings.TrimSpace(cfg.EtherTalk.Backend)) + if cfg.EtherTalk.Backend == "" { + cfg.EtherTalk.Device = "" + } + + cfg.MacIPEnabled = boolWithDefault(k, "MacIP.enabled", cfg.MacIPEnabled) + mode := strings.ToLower(stringWithDefault(k, "MacIP.mode", "")) switch mode { case "", "pcap": cfg.MacIPNAT = false @@ -164,186 +94,60 @@ func loadConfigFromINI(path string) (iniConfig, error) { default: return cfg, fmt.Errorf("[MacIP] mode must be pcap or nat, got %q", mode) } - cfg.MacIPNameserver = parseStringKey(macipSection, "nameserver", cfg.MacIPNameserver) - cfg.MacIPSubnet = parseStringKey(macipSection, "nat_subnet", cfg.MacIPSubnet) - // nat_gw maps to -macip-nat-gw. - cfg.MacIPGWIP = parseStringKey(macipSection, "nat_gw", cfg.MacIPGWIP) - cfg.MacIPLeaseFile = parseStringKey(macipSection, "lease_file", cfg.MacIPLeaseFile) - // ip_gateway maps to -macip-ip-gateway. - cfg.MacIPGatewayIP = parseStringKey(macipSection, "ip_gateway", cfg.MacIPGatewayIP) - if cfg.MacIPDHCPRelay, err = parseBoolKey(macipSection, "dhcp_relay", cfg.MacIPDHCPRelay); err != nil { - return cfg, err - } - cfg.MacIPZone = parseStringKey(macipSection, "zone", cfg.MacIPZone) - - afpSection := f.Section("AFP") - if cfg.AFPEnabled, err = parseBoolKey(afpSection, "enabled", cfg.AFPEnabled); err != nil { - return cfg, err - } - cfg.AFPServerName = parseStringKey(afpSection, "name", cfg.AFPServerName) - cfg.AFPZone = parseStringKey(afpSection, "zone", cfg.AFPZone) - cfg.AFPProtocols = parseStringKey(afpSection, "protocols", cfg.AFPProtocols) - cfg.AFPTCPBinding = parseStringKey(afpSection, "binding", cfg.AFPTCPBinding) - cfg.AFPExtensionMapPath = parseStringKey(afpSection, "extension_map", cfg.AFPExtensionMapPath) - if cfg.AFPExtensionMapPath != "" && !filepath.IsAbs(cfg.AFPExtensionMapPath) { - cfg.AFPExtensionMapPath = filepath.Join(filepath.Dir(path), cfg.AFPExtensionMapPath) - } - cfg.AFPVolumes = nil - var ( - seenDecomposed bool - seenCNIDBackend bool - ) - for _, sec := range f.Sections() { - if !strings.HasPrefix(strings.ToLower(sec.Name()), "volumes.") { - continue - } - - sectionName := sec.Name() - defaultVolumeName := strings.TrimPrefix(sectionName, "Volumes.") - if defaultVolumeName == sectionName { - defaultVolumeName = strings.TrimPrefix(sectionName, "volumes.") - } - name := parseStringKey(sec, "name", defaultVolumeName) - pathVal := parseStringKey(sec, "path", "") - if strings.TrimSpace(pathVal) == "" { - return cfg, fmt.Errorf("[%s] path is required", sectionName) - } - - vol := afp.VolumeConfig{Name: name, Path: pathVal} - if sec.HasKey("rebuild_desktop_db") { - v, parseErr := parseBoolKey(sec, "rebuild_desktop_db", false) - if parseErr != nil { - return cfg, parseErr - } - vol.RebuildDesktopDB = v - } - - if sec.HasKey("read_only") { - v, parseErr := parseBoolKey(sec, "read_only", false) - if parseErr != nil { - return cfg, parseErr - } - vol.ReadOnly = v - } - - if sec.HasKey("use_decomposed_names") { - v, parseErr := parseBoolKey(sec, "use_decomposed_names", cfg.AFPDecomposedFilenames) - if parseErr != nil { - return cfg, parseErr - } - if seenDecomposed && v != cfg.AFPDecomposedFilenames { - return cfg, fmt.Errorf("[%s] use_decomposed_names conflicts with another volume section", sectionName) - } - cfg.AFPDecomposedFilenames = v - seenDecomposed = true - } - - if sec.HasKey("cnid_backend") { - backendVal := parseStringKey(sec, "cnid_backend", cfg.AFPCNIDBackend) - if backendVal == "" { - backendVal = cfg.AFPCNIDBackend - } - if seenCNIDBackend && !strings.EqualFold(backendVal, cfg.AFPCNIDBackend) { - return cfg, fmt.Errorf("[%s] cnid_backend conflicts with another volume section", sectionName) - } - cfg.AFPCNIDBackend = backendVal - seenCNIDBackend = true - } - - if sec.HasKey("fork_backend") { - forkBackend := strings.ToLower(parseStringKey(sec, "fork_backend", "")) - if forkBackend != "" && forkBackend != "appledouble" { - return cfg, fmt.Errorf("[%s] fork_backend must be blank or AppleDouble", sectionName) - } - } + cfg.MacIPNameserver = stringWithDefault(k, "MacIP.nameserver", cfg.MacIPNameserver) + cfg.MacIPSubnet = stringWithDefault(k, "MacIP.nat_subnet", cfg.MacIPSubnet) + cfg.MacIPGWIP = stringWithDefault(k, "MacIP.nat_gw", cfg.MacIPGWIP) + cfg.MacIPLeaseFile = stringWithDefault(k, "MacIP.lease_file", cfg.MacIPLeaseFile) + cfg.MacIPGatewayIP = stringWithDefault(k, "MacIP.ip_gateway", cfg.MacIPGatewayIP) + cfg.MacIPDHCPRelay = boolWithDefault(k, "MacIP.dhcp_relay", cfg.MacIPDHCPRelay) + cfg.MacIPZone = stringWithDefault(k, "MacIP.zone", cfg.MacIPZone) - if sec.HasKey("appledouble_mode") { - modeVal := strings.ToLower(parseStringKey(sec, "appledouble_mode", "")) - parsedMode, parseErr := parseINIAppleDoubleMode(modeVal) - if parseErr != nil { - return cfg, fmt.Errorf("[%s] %w", sectionName, parseErr) - } - vol.AppleDoubleMode = parsedMode - } - - cfg.AFPVolumes = append(cfg.AFPVolumes, vol) - } - - if !cfg.AFPEnabled { - cfg.AFPVolumes = nil - } - - loggingSection := f.Section("Logging") - cfg.LogLevel = parseStringKey(loggingSection, "level", cfg.LogLevel) - if cfg.ParsePackets, err = parseBoolKey(loggingSection, "parse_packets", cfg.ParsePackets); err != nil { - return cfg, err - } - if cfg.LogTraffic, err = parseBoolKey(loggingSection, "log_traffic", cfg.LogTraffic); err != nil { - return cfg, err - } - cfg.ParseOutput = parseStringKey(loggingSection, "parse_output", cfg.ParseOutput) + cfg.LogLevel = stringWithDefault(k, "Logging.level", cfg.LogLevel) + cfg.ParsePackets = boolWithDefault(k, "Logging.parse_packets", cfg.ParsePackets) + cfg.LogTraffic = boolWithDefault(k, "Logging.log_traffic", cfg.LogTraffic) + cfg.ParseOutput = stringWithDefault(k, "Logging.parse_output", cfg.ParseOutput) return cfg, nil } -func parseStringKey(sec *ini.Section, key, defaultVal string) string { - if !sec.HasKey(key) { - return defaultVal - } - v := stripOptionalQuotes(sec.Key(key).String()) - if strings.TrimSpace(v) == "" { - return defaultVal - } - return v +// validatable is the shape that every package's Config struct exposes: +// koanf-tagged fields, defaults via the package's DefaultConfig(), and a +// Validate method that enforces logical (not syntactic) rules. +type validatable interface { + Validate() error } -func parseBoolKey(sec *ini.Section, key string, defaultVal bool) (bool, error) { - if !sec.HasKey(key) { - return defaultVal, nil +// loadSection unmarshals a single subtree of the koanf instance onto an +// already-defaulted target, then runs the target's Validate. The target +// must be a pointer to a struct with koanf tags; it must also satisfy +// the validatable interface. +func loadSection(k *koanf.Koanf, key string, target validatable) error { + if !k.Exists(key) { + return target.Validate() } - v := strings.TrimSpace(stripOptionalQuotes(sec.Key(key).String())) - if v == "" { - return defaultVal, nil + if err := k.UnmarshalWithConf(key, target, koanf.UnmarshalConf{Tag: "koanf"}); err != nil { + return fmt.Errorf("[%s] %w", key, err) } - parsed, err := strconv.ParseBool(v) - if err != nil { - return false, fmt.Errorf("[%s] invalid bool for %q: %q", sec.Name(), key, v) + if err := target.Validate(); err != nil { + return fmt.Errorf("[%s] %w", key, err) } - return parsed, nil + return nil } -func parseUintKey(sec *ini.Section, key string, defaultVal uint) (uint, error) { - if !sec.HasKey(key) { - return defaultVal, nil +func stringWithDefault(k *koanf.Koanf, path, def string) string { + if !k.Exists(path) { + return def } - v := strings.TrimSpace(stripOptionalQuotes(sec.Key(key).String())) + v := strings.TrimSpace(k.String(path)) if v == "" { - return defaultVal, nil + return def } - parsed, err := strconv.ParseUint(v, 10, 32) - if err != nil { - return 0, fmt.Errorf("[%s] invalid uint for %q: %q", sec.Name(), key, v) - } - return uint(parsed), nil -} - -func stripOptionalQuotes(s string) string { - s = strings.TrimSpace(s) - if len(s) >= 2 { - if (s[0] == '\'' && s[len(s)-1] == '\'') || (s[0] == '"' && s[len(s)-1] == '"') { - return strings.TrimSpace(s[1 : len(s)-1]) - } - } - return s + return v } -func parseINIAppleDoubleMode(value string) (afp.AppleDoubleMode, error) { - switch strings.ToLower(strings.TrimSpace(value)) { - case "", "modern", string(afp.AppleDoubleModeModern): - return afp.AppleDoubleModeModern, nil - case "legacy", string(afp.AppleDoubleModeLegacy): - return afp.AppleDoubleModeLegacy, nil - default: - return "", fmt.Errorf("appledouble_mode must be modern or legacy, got %q", value) +func boolWithDefault(k *koanf.Koanf, path string, def bool) bool { + if !k.Exists(path) { + return def } + return k.Bool(path) } diff --git a/cmd/omnitalk/config_ini_test.go b/cmd/omnitalk/config_ini_test.go deleted file mode 100644 index 094c1f3..0000000 --- a/cmd/omnitalk/config_ini_test.go +++ /dev/null @@ -1,235 +0,0 @@ -package main - -import ( - "net" - "os" - "path/filepath" - "testing" - - "github.com/pgodw/omnitalk/go/service/afp" -) - -func TestLoadConfigFromINI_ParsesSections(t *testing.T) { - dir := t.TempDir() - cfgPath := filepath.Join(dir, "server.ini") - content := `[LToUdp] -enabled = true - interface = 192.168.0.103 -seed_network = 11 -seed_zone = "LToUDP Network" - -[TashTalk] -port = COM1 -seed_network = 12 -seed_zone = "TashTalk Network" - -[EtherTalk] -backend = pcap -device = "eth0" -hw_address = "DE:AD:BE:EF:CA:FE" -bridge_mode = wifi -bridge_host_mac = "AA:BB:CC:DD:EE:FF" -seed_network_min = 3 -seed_network_max = 9 -seed_zone = "EtherTalk Network" - -[MacIP] -enabled = true -mode = nat -nameserver = 1.1.1.1 -nat_subnet = 10.1.0.0/24 -nat_gw = 10.1.0.1 -ip_gateway = 192.168.0.1 -dhcp_relay = true -lease_file = leases.txt -zone = "MacIP Zone" - -[AFP] -enabled = true -name = "OmniTalk" -zone = "EtherTalk Network" -protocols = ddp,tcp -binding = ":548" -extension_map = "extmap.conf" - -[Volumes.Main] -name = "Main" -path = "C:\Mac" -cnid_backend = memory -use_decomposed_names = true -fork_backend = AppleDouble -appledouble_mode = legacy - -[Logging] -level = debug -parse_packets = true -log_traffic = true -` - if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { - t.Fatalf("write config: %v", err) - } - - cfg, err := loadConfigFromINI(cfgPath) - if err != nil { - t.Fatalf("loadConfigFromINI error: %v", err) - } - - if cfg.LogLevel != "debug" || !cfg.LogTraffic || !cfg.ParsePackets { - t.Fatalf("unexpected logging config: %#v", cfg) - } - if cfg.LToUDPInterface != "192.168.0.103" || cfg.LToUDPSeedNetwork != 11 || cfg.TashTalkPort != "COM1" { - t.Fatalf("unexpected LocalTalk/TashTalk config: %#v", cfg) - } - if cfg.EtherTalkDevice != "eth0" || cfg.EtherTalkSeedNetworkMax != 9 { - t.Fatalf("unexpected EtherTalk config: %#v", cfg) - } - if cfg.EtherTalkBackend != "pcap" { - t.Fatalf("unexpected EtherTalk backend: %q", cfg.EtherTalkBackend) - } - if cfg.EtherTalkBridgeMode != "wifi" || cfg.EtherTalkBridgeHostMAC != "AA:BB:CC:DD:EE:FF" { - t.Fatalf("unexpected EtherTalk bridge config: %#v", cfg) - } - if !cfg.MacIPEnabled || !cfg.MacIPNAT || cfg.MacIPGWIP != "10.1.0.1" || cfg.MacIPGatewayIP != "192.168.0.1" || cfg.MacIPNameserver != "1.1.1.1" { - t.Fatalf("unexpected MacIP config: %#v", cfg) - } - if cfg.AFPExtensionMapPath != filepath.Join(dir, "extmap.conf") { - t.Fatalf("AFPExtensionMapPath = %q, want %q", cfg.AFPExtensionMapPath, filepath.Join(dir, "extmap.conf")) - } - if len(cfg.AFPVolumes) != 1 || cfg.AFPVolumes[0].Path != "C:\\Mac" { - t.Fatalf("unexpected AFP volumes: %#v", cfg.AFPVolumes) - } - if cfg.AFPVolumes[0].AppleDoubleMode != afp.AppleDoubleModeLegacy { - t.Fatalf("expected volume to have legacy AppleDouble mode, got %q", cfg.AFPVolumes[0].AppleDoubleMode) - } -} - -func TestLoadConfigFromINI_ConflictingVolumeOptions(t *testing.T) { - dir := t.TempDir() - cfgPath := filepath.Join(dir, "server.ini") - content := `[Volumes.One] -name = "One" -path = "/tmp/one" -use_decomposed_names = true - -[Volumes.Two] -name = "Two" -path = "/tmp/two" -use_decomposed_names = false -` - if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { - t.Fatalf("write config: %v", err) - } - - if _, err := loadConfigFromINI(cfgPath); err == nil { - t.Fatal("expected conflict error, got nil") - } -} - -func TestLoadConfigFromINI_BlankNatGatewayKeepsDefault(t *testing.T) { - dir := t.TempDir() - cfgPath := filepath.Join(dir, "server.ini") - content := `[MacIP] -enabled = true -mode = nat - nat_subnet = -nat_gw = -ip_gateway = 192.168.0.1 -` - if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { - t.Fatalf("write config: %v", err) - } - - cfg, err := loadConfigFromINI(cfgPath) - if err != nil { - t.Fatalf("loadConfigFromINI error: %v", err) - } - - if cfg.MacIPGWIP != "" { - t.Fatalf("MacIPGWIP = %q, want blank default", cfg.MacIPGWIP) - } - if cfg.MacIPSubnet != "192.168.100.0/24" { - t.Fatalf("MacIPSubnet = %q, want default %q", cfg.MacIPSubnet, "192.168.100.0/24") - } - if cfg.MacIPGatewayIP != "192.168.0.1" { - t.Fatalf("MacIPGatewayIP = %q, want %q", cfg.MacIPGatewayIP, "192.168.0.1") - } -} - -func TestResolveMacIPGatewayIP_PcapModeUsesUpstreamGateway(t *testing.T) { - _, subnet, err := net.ParseCIDR("10.1.0.0/24") - if err != nil { - t.Fatalf("ParseCIDR: %v", err) - } - got := resolveMacIPGatewayIP("192.168.100.1", subnet, net.ParseIP("192.168.100.1"), false) - if got == nil || got.String() != "192.168.100.1" { - t.Fatalf("resolveMacIPGatewayIP pcap = %v, want 192.168.100.1", got) - } -} - -func TestResolveMacIPGatewayIP_NATModeUsesConfiguredOrSubnetDefault(t *testing.T) { - _, subnet, err := net.ParseCIDR("10.1.0.0/24") - if err != nil { - t.Fatalf("ParseCIDR: %v", err) - } - configured := resolveMacIPGatewayIP("10.1.0.1", subnet, net.ParseIP("192.168.1.1"), true) - if configured == nil || configured.String() != "10.1.0.1" { - t.Fatalf("resolveMacIPGatewayIP configured = %v, want 10.1.0.1", configured) - } - - fallback := resolveMacIPGatewayIP("", subnet, net.ParseIP("192.168.1.1"), true) - if fallback == nil || fallback.String() != "10.1.0.1" { - t.Fatalf("resolveMacIPGatewayIP fallback = %v, want 10.1.0.1", fallback) - } -} - -// TestLoadConfigFromINI_PerVolumeAppleDoubleMode verifies that two volumes in the -// same config file can independently specify different AppleDouble modes, and that -// each volume carries its own setting rather than a shared global one. -func TestLoadConfigFromINI_PerVolumeAppleDoubleMode(t *testing.T) { - dir := t.TempDir() - cfgPath := filepath.Join(dir, "server.ini") - content := `[Volumes.Modern] -name = "Modern" -path = "/tmp/modern" -appledouble_mode = modern - -[Volumes.Legacy] -name = "Legacy" -path = "/tmp/legacy" -appledouble_mode = legacy -` - if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { - t.Fatalf("write config: %v", err) - } - - cfg, err := loadConfigFromINI(cfgPath) - if err != nil { - t.Fatalf("loadConfigFromINI error: %v", err) - } - - if len(cfg.AFPVolumes) != 2 { - t.Fatalf("expected 2 volumes, got %d", len(cfg.AFPVolumes)) - } - - // Find volumes by name regardless of parse order. - volsByName := make(map[string]afp.VolumeConfig) - for _, v := range cfg.AFPVolumes { - volsByName[v.Name] = v - } - - modernVol, ok := volsByName["Modern"] - if !ok { - t.Fatal("volume \"Modern\" not found") - } - if modernVol.AppleDoubleMode != afp.AppleDoubleModeModern { - t.Fatalf("Modern volume AppleDoubleMode = %q, want %q", modernVol.AppleDoubleMode, afp.AppleDoubleModeModern) - } - - legacyVol, ok := volsByName["Legacy"] - if !ok { - t.Fatal("volume \"Legacy\" not found") - } - if legacyVol.AppleDoubleMode != afp.AppleDoubleModeLegacy { - t.Fatalf("Legacy volume AppleDoubleMode = %q, want %q", legacyVol.AppleDoubleMode, afp.AppleDoubleModeLegacy) - } -} diff --git a/cmd/omnitalk/config_test.go b/cmd/omnitalk/config_test.go new file mode 100644 index 0000000..bb19c78 --- /dev/null +++ b/cmd/omnitalk/config_test.go @@ -0,0 +1,106 @@ +package main + +import ( + "os" + "path/filepath" + "testing" +) + +func TestLoadConfig_BlankNatGatewayKeepsDefault(t *testing.T) { + dir := t.TempDir() + cfgPath := filepath.Join(dir, "server.toml") + content := `[MacIP] +enabled = true +mode = "nat" +nat_subnet = "" +nat_gw = "" +ip_gateway = "192.168.0.1" +` + if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { + t.Fatalf("write config: %v", err) + } + + cfg, _, err := loadConfigFromFile(cfgPath) + if err != nil { + t.Fatalf("loadConfigFromFile error: %v", err) + } + + if cfg.MacIPGWIP != "" { + t.Fatalf("MacIPGWIP = %q, want blank default", cfg.MacIPGWIP) + } + if cfg.MacIPSubnet != "192.168.100.0/24" { + t.Fatalf("MacIPSubnet = %q, want default %q", cfg.MacIPSubnet, "192.168.100.0/24") + } + if cfg.MacIPGatewayIP != "192.168.0.1" { + t.Fatalf("MacIPGatewayIP = %q, want %q", cfg.MacIPGatewayIP, "192.168.0.1") + } +} + +func TestLoadConfig_LoggingAndPortsSections(t *testing.T) { + dir := t.TempDir() + cfgPath := filepath.Join(dir, "server.toml") + content := `[LToUdp] +enabled = true +interface = "192.168.0.103" +seed_network = 11 +seed_zone = "LToUDP Network" + +[TashTalk] +port = "COM1" +seed_network = 12 +seed_zone = "TashTalk Network" + +[EtherTalk] +backend = "pcap" +device = "eth0" +hw_address = "DE:AD:BE:EF:CA:FE" +bridge_mode = "wifi" +bridge_host_mac = "AA:BB:CC:DD:EE:FF" +seed_network_min = 3 +seed_network_max = 9 +seed_zone = "EtherTalk Network" + +[MacIP] +enabled = true +mode = "nat" +nameserver = "1.1.1.1" +nat_subnet = "10.1.0.0/24" +nat_gw = "10.1.0.1" +ip_gateway = "192.168.0.1" +dhcp_relay = true +lease_file = "leases.txt" +zone = "MacIP Zone" + +[Logging] +level = "debug" +parse_packets = true +log_traffic = true +` + if err := os.WriteFile(cfgPath, []byte(content), 0o600); err != nil { + t.Fatalf("write config: %v", err) + } + + cfg, _, err := loadConfigFromFile(cfgPath) + if err != nil { + t.Fatalf("loadConfigFromFile error: %v", err) + } + + if cfg.LogLevel != "debug" || !cfg.LogTraffic || !cfg.ParsePackets { + t.Fatalf("unexpected logging config: %#v", cfg) + } + if cfg.LToUDP.Interface != "192.168.0.103" || cfg.LToUDP.SeedNetwork != 11 || cfg.TashTalk.Port != "COM1" { + t.Fatalf("unexpected LocalTalk/TashTalk config: %#v", cfg) + } + if cfg.EtherTalk.Device != "eth0" || cfg.EtherTalk.SeedNetworkMax != 9 { + t.Fatalf("unexpected EtherTalk config: %#v", cfg) + } + if cfg.EtherTalk.Backend != "pcap" { + t.Fatalf("unexpected EtherTalk backend: %q", cfg.EtherTalk.Backend) + } + if cfg.EtherTalk.BridgeMode != "wifi" || cfg.EtherTalk.BridgeHostMAC != "AA:BB:CC:DD:EE:FF" { + t.Fatalf("unexpected EtherTalk bridge config: %#v", cfg) + } + if !cfg.MacIPEnabled || !cfg.MacIPNAT || cfg.MacIPGWIP != "10.1.0.1" || cfg.MacIPGatewayIP != "192.168.0.1" || cfg.MacIPNameserver != "1.1.1.1" { + t.Fatalf("unexpected MacIP config: %#v", cfg) + } +} diff --git a/cmd/omnitalk/doc.go b/cmd/omnitalk/doc.go new file mode 100644 index 0000000..65847c1 --- /dev/null +++ b/cmd/omnitalk/doc.go @@ -0,0 +1,15 @@ +/* +Command omnitalk is the AppleTalk Phase 2 router and AFP file server. + +It wires ports (EtherTalk, LToUDP, TashTalk, virtual LocalTalk) to a +router, registers the requested services (RTMP, ZIP, NBP, AEP, AFP over +ASP/DSI, MacIP), and runs until interrupted. Configuration comes from +flags and an optional TOML file; build tags (afp, macgarden, macip, +sqlite_cnid) gate the optional subsystems so a router-only binary +shrinks accordingly. + +This package is the wiring layer only — protocol logic lives under +protocol/, link-layer transports under port/, and stateful services +under service/. +*/ +package main diff --git a/cmd/omnitalk/extension_map.go b/cmd/omnitalk/extension_map.go index 69bca2a..bded9ae 100644 --- a/cmd/omnitalk/extension_map.go +++ b/cmd/omnitalk/extension_map.go @@ -1,3 +1,5 @@ +//go:build afp || all + package main import ( @@ -6,7 +8,7 @@ import ( "regexp" "strings" - "github.com/pgodw/omnitalk/go/service/afp" + "github.com/pgodw/omnitalk/service/afp" ) var extMapLinePattern = regexp.MustCompile(`^(\S+)\s+"([^"]*)"\s+"([^"]*)"`) diff --git a/cmd/omnitalk/extension_map_test.go b/cmd/omnitalk/extension_map_test.go index c6e1145..a98c7a8 100644 --- a/cmd/omnitalk/extension_map_test.go +++ b/cmd/omnitalk/extension_map_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package main import "testing" diff --git a/cmd/omnitalk/macgarden_register.go b/cmd/omnitalk/macgarden_register.go new file mode 100644 index 0000000..061765b --- /dev/null +++ b/cmd/omnitalk/macgarden_register.go @@ -0,0 +1,5 @@ +//go:build (afp && macgarden) || all + +package main + +import _ "github.com/pgodw/omnitalk/service/afpfs/macgarden" diff --git a/cmd/omnitalk/macip_disabled.go b/cmd/omnitalk/macip_disabled.go new file mode 100644 index 0000000..29a11d6 --- /dev/null +++ b/cmd/omnitalk/macip_disabled.go @@ -0,0 +1,15 @@ +//go:build !macip && !all + +package main + +import "github.com/pgodw/omnitalk/netlog" + +// wireMacIP is the no-op stub used when the binary is built without the +// macip tag. It logs a warning if the operator asked for MacIP and exits +// returning a nil hook so the rest of main.go skips MacIP wiring. +func wireMacIP(cfg MacIPConfig) (MacIPHook, error) { + if cfg.Enabled { + netlog.Warn("[MAIN][MacIP] -macip-enabled set but binary was built without -tags macip; ignoring") + } + return nil, nil +} diff --git a/cmd/omnitalk/macip_enabled.go b/cmd/omnitalk/macip_enabled.go new file mode 100644 index 0000000..c253295 --- /dev/null +++ b/cmd/omnitalk/macip_enabled.go @@ -0,0 +1,155 @@ +//go:build macip || all + +package main + +import ( + "fmt" + "net" + "strings" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/pkg/hwaddr" + "github.com/pgodw/omnitalk/port/rawlink" + "github.com/pgodw/omnitalk/service" + "github.com/pgodw/omnitalk/service/macip" +) + +type macipHook struct { + svc *macip.Service +} + +func (h *macipHook) Service() service.Service { return h.svc } +func (h *macipHook) PinLeaseToSession(net uint16, node, sess uint8) { h.svc.PinLeaseToSession(net, node, sess) } +func (h *macipHook) UnpinLeaseFromSession(sess uint8) { h.svc.UnpinLeaseFromSession(sess) } +func (h *macipHook) MarkSessionActivity(sess uint8) { h.svc.MarkSessionActivity(sess) } + +func wireMacIP(cfg MacIPConfig) (MacIPHook, error) { + if !cfg.Enabled { + return nil, nil + } + if cfg.EtherTalkBackend != "" && cfg.EtherTalkBackend != "pcap" { + return nil, fmt.Errorf("-macip-enabled currently requires -ethertalk-backend pcap (got %q)", cfg.EtherTalkBackend) + } + + ipIface := cfg.PcapDevice + if ipIface == "" { + if detected, ok := rawlink.DetectDefaultPcapInterface(); ok { + ipIface = detected + netlog.Info("[MAIN][MacIP] auto-detected pcap interface: %s", detected) + } else { + return nil, fmt.Errorf("-ethertalk-device is required when -macip-enabled is set (auto-detection failed)") + } + } + + ipMACStr := "" + if strings.TrimSpace(cfg.BridgeHostMAC) != "" { + ipMACStr = cfg.BridgeHostMAC + netlog.Info("[MAIN][MacIP] using bridge host MAC for IP-side: %s", ipMACStr) + } else if hostMAC, ok := rawlink.DetectHostMACForPcapInterface(ipIface); ok { + ipMACStr = hostMAC + netlog.Info("[MAIN][MacIP] auto-detected IP-side MAC from %s: %s", ipIface, ipMACStr) + } else { + ipMACStr = cfg.PcapHWAddr + } + + hostIPStr, hostIPDetected := detectPcapInterfaceIPv4(ipIface) + + if cfg.IPGateway == "" { + if gw, ok := rawlink.DetectDefaultGatewayForPcapInterface(ipIface); ok { + cfg.IPGateway = gw + netlog.Info("[MAIN][MacIP] auto-detected default gateway %s for interface %s", gw, ipIface) + } else if hostIPDetected { + cfg.IPGateway = hostIPStr + netlog.Warn("[MAIN][MacIP] default gateway auto-detection failed; falling back to interface IPv4 %s on %s", hostIPStr, ipIface) + } else { + return nil, fmt.Errorf("-macip-ip-gateway is required when -macip-enabled is set (auto-detection failed and no IPv4 address was found)") + } + } + + _, ipNet, err := net.ParseCIDR(cfg.NATSubnet) + if err != nil { + return nil, fmt.Errorf("invalid -macip-nat-subnet: %w", err) + } + ipMACAddr, err := hwaddr.ParseEthernet(ipMACStr) + if err != nil { + return nil, fmt.Errorf("invalid IP-side MAC: %w", err) + } + ipMAC := ipMACAddr.HardwareAddr() + ipGW := net.ParseIP(cfg.IPGateway).To4() + if ipGW == nil { + return nil, fmt.Errorf("invalid -macip-ip-gateway: %q", cfg.IPGateway) + } + var hostIP net.IP + if hostIPDetected { + hostIP = net.ParseIP(hostIPStr).To4() + } + gwIP := resolveMacIPGatewayIP(cfg.NATGatewayIP, ipNet, ipGW, cfg.NAT) + if gwIP == nil { + return nil, fmt.Errorf("invalid -macip-nat-gw: %q", cfg.NATGatewayIP) + } + if !cfg.NAT && strings.TrimSpace(cfg.NATGatewayIP) != "" { + netlog.Info("[MAIN][MacIP] ignoring -macip-nat-gw in non-NAT mode; using upstream gateway %s", gwIP) + } else if !cfg.NAT { + netlog.Info("[MAIN][MacIP] using upstream gateway %s in non-NAT mode", gwIP) + } + if cfg.NAT && gwIP.Equal(ipGW) { + return nil, fmt.Errorf("invalid MacIP configuration: -macip-nat-gw (%s) conflicts with the host-side upstream gateway (%s); choose a different MacIP gateway IP", gwIP, ipGW) + } + nsIP := ipGW + if cfg.Nameserver != "" { + nsIP = net.ParseIP(cfg.Nameserver).To4() + if nsIP == nil { + return nil, fmt.Errorf("invalid -macip-nameserver: %q", cfg.Nameserver) + } + } + + broadcast := broadcastAddr(ipNet) + var chosenZone []byte + if cfg.Zone != "" { + chosenZone = []byte(cfg.Zone) + } else if cfg.EtherTalkZone != "" { + chosenZone = []byte(cfg.EtherTalkZone) + } + + ipLink, err := rawlink.OpenPcap(rawlink.DefaultMacIPConfig(ipIface)) + if err != nil { + return nil, fmt.Errorf("failed opening MacIP rawlink on %s: %w", ipIface, err) + } + if fl, ok := ipLink.(rawlink.FilterableLink); ok { + if err := fl.SetFilter(macipBPFFilter(ipNet, cfg.DHCPRelay)); err != nil { + netlog.Warn("[MAIN][MacIP] could not set BPF filter on %s: %v", ipIface, err) + } + } + + svc := macip.New( + gwIP, ipNet.IP, ipNet.Mask, + nsIP, broadcast, + chosenZone, + cfg.NBP, + ipLink, ipMAC, hostIP, ipGW, + cfg.NAT, + cfg.DHCPRelay, + cfg.StateFile, + ) + netlog.Info("[MAIN][MacIP] gw=%s subnet=%s iface=%s host-ip=%s ip-gw=%s zone=%q nat=%t dhcp_relay=%t", + gwIP, cfg.NATSubnet, ipIface, hostIP, ipGW, string(chosenZone), cfg.NAT, cfg.DHCPRelay) + return &macipHook{svc: svc}, nil +} + +func resolveMacIPGatewayIP(configured string, natSubnet *net.IPNet, upstreamGateway net.IP, natMode bool) net.IP { + if !natMode { + return append(net.IP(nil), upstreamGateway.To4()...) + } + trimmed := strings.TrimSpace(configured) + if trimmed != "" { + return net.ParseIP(trimmed).To4() + } + return firstUsableIPv4(natSubnet) +} + +func macipBPFFilter(ipNet *net.IPNet, dhcpMode bool) string { + if dhcpMode { + return "(arp) or (ip) or (udp dst port 68)" + } + return fmt.Sprintf("(arp) or (dst net %s)", ipNet.String()) +} diff --git a/cmd/omnitalk/macip_hook.go b/cmd/omnitalk/macip_hook.go new file mode 100644 index 0000000..f1c2144 --- /dev/null +++ b/cmd/omnitalk/macip_hook.go @@ -0,0 +1,47 @@ +package main + +import ( + "github.com/pgodw/omnitalk/service" + "github.com/pgodw/omnitalk/service/zip" +) + +// MacIPHook is the cmd-layer abstraction over the optional MacIP gateway. +// The real implementation lives behind //go:build macip; the stub returns +// nil so router-only builds compile without the macip dependency surface. +type MacIPHook interface { + Service() service.Service + PinLeaseToSession(net uint16, node, sessID uint8) + UnpinLeaseFromSession(sessID uint8) + MarkSessionActivity(sessID uint8) +} + +// macIPAFPHooks adapts a MacIPHook to the AFPSessionHooks interface +// expected by AFP's ASP transport, so the two optional subsystems can +// be wired together without either side importing the other. +type macIPAFPHooks struct{ h MacIPHook } + +func (a macIPAFPHooks) OnOpen(net uint16, node, sessID uint8) { + a.h.PinLeaseToSession(net, node, sessID) +} +func (a macIPAFPHooks) OnClose(sessID uint8) { a.h.UnpinLeaseFromSession(sessID) } +func (a macIPAFPHooks) OnActivity(sessID uint8) { a.h.MarkSessionActivity(sessID) } + +// MacIPConfig collects every flag value wireMacIP needs, decoupling the +// caller (main.go, tag-neutral) from the macip package directly. +type MacIPConfig struct { + Enabled bool + NATGatewayIP string + NATSubnet string + Nameserver string + Zone string + IPGateway string + NAT bool + DHCPRelay bool + StateFile string + PcapDevice string + BridgeHostMAC string + PcapHWAddr string + EtherTalkZone string + EtherTalkBackend string + NBP *zip.NameInformationService +} diff --git a/cmd/omnitalk/macip_test.go b/cmd/omnitalk/macip_test.go new file mode 100644 index 0000000..5c827e2 --- /dev/null +++ b/cmd/omnitalk/macip_test.go @@ -0,0 +1,35 @@ +//go:build macip || all + +package main + +import ( + "net" + "testing" +) + +func TestResolveMacIPGatewayIP_PcapModeUsesUpstreamGateway(t *testing.T) { + _, subnet, err := net.ParseCIDR("10.1.0.0/24") + if err != nil { + t.Fatalf("ParseCIDR: %v", err) + } + got := resolveMacIPGatewayIP("192.168.100.1", subnet, net.ParseIP("192.168.100.1"), false) + if got == nil || got.String() != "192.168.100.1" { + t.Fatalf("resolveMacIPGatewayIP pcap = %v, want 192.168.100.1", got) + } +} + +func TestResolveMacIPGatewayIP_NATModeUsesConfiguredOrSubnetDefault(t *testing.T) { + _, subnet, err := net.ParseCIDR("10.1.0.0/24") + if err != nil { + t.Fatalf("ParseCIDR: %v", err) + } + configured := resolveMacIPGatewayIP("10.1.0.1", subnet, net.ParseIP("192.168.1.1"), true) + if configured == nil || configured.String() != "10.1.0.1" { + t.Fatalf("resolveMacIPGatewayIP configured = %v, want 10.1.0.1", configured) + } + + fallback := resolveMacIPGatewayIP("", subnet, net.ParseIP("192.168.1.1"), true) + if fallback == nil || fallback.String() != "10.1.0.1" { + t.Fatalf("resolveMacIPGatewayIP fallback = %v, want 10.1.0.1", fallback) + } +} diff --git a/cmd/omnitalk/main.go b/cmd/omnitalk/main.go index 379a091..cdc80f6 100644 --- a/cmd/omnitalk/main.go +++ b/cmd/omnitalk/main.go @@ -1,7 +1,7 @@ package main import ( - "encoding/hex" + "context" "flag" "fmt" "log" @@ -12,27 +12,26 @@ import ( "strings" "syscall" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/port/ethertalk" - "github.com/pgodw/omnitalk/go/port/localtalk" - "github.com/pgodw/omnitalk/go/port/rawlink" - "github.com/pgodw/omnitalk/go/router" - "github.com/pgodw/omnitalk/go/service" - "github.com/pgodw/omnitalk/go/service/aep" - "github.com/pgodw/omnitalk/go/service/afp" - "github.com/pgodw/omnitalk/go/service/asp" - "github.com/pgodw/omnitalk/go/service/dsi" - "github.com/pgodw/omnitalk/go/service/llap" - "github.com/pgodw/omnitalk/go/service/macip" - "github.com/pgodw/omnitalk/go/service/rtmp" - "github.com/pgodw/omnitalk/go/service/zip" + "github.com/pgodw/omnitalk/config" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/pkg/hwaddr" + "github.com/pgodw/omnitalk/pkg/logging" + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/port/ethertalk" + "github.com/pgodw/omnitalk/port/localtalk" + "github.com/pgodw/omnitalk/port/rawlink" + "github.com/pgodw/omnitalk/router" + "github.com/pgodw/omnitalk/service" + "github.com/pgodw/omnitalk/service/aep" + "github.com/pgodw/omnitalk/service/llap" + "github.com/pgodw/omnitalk/service/rtmp" + "github.com/pgodw/omnitalk/service/zip" ) func main() { log.SetFlags(log.LstdFlags | log.Lmicroseconds) - configPath := flag.String("config", "", "Path to INI config file (cannot be combined with other flags)") + configPath := flag.String("config", "", "Path to TOML config file (cannot be combined with other flags)") showVersion := flag.Bool("version", false, "Print OmniTalk version information and exit") logLevel := flag.String("log-level", "info", "Minimum log level: debug, info, warn") @@ -75,7 +74,8 @@ func main() { parsePackets := flag.Bool("parse-packets", false, "Decode and log every inbound DDP packet (ATP/ASP/AFP layers)") parseOutput := flag.String("parse-output", "", "File path to write parsed packet log (appended; empty = stdout only)") - // AFP file sharing flags. + // AFP file sharing flags. Schemas live in service/afp; cmd-side + // wiring is split between afp_enabled.go and afp_disabled.go. afpServerName := flag.String("afp-name", "Go File Server", "AFP server name advertised to clients") afpZone := flag.String("afp-zone", "", "AppleTalk zone for AFP NBP registration (default: first zone found)") afpProtocols := flag.String("afp-protocols", "tcp,ddp", "AFP protocols to enable: tcp, ddp, or tcp,ddp") @@ -83,7 +83,7 @@ func main() { afpExtensionMap := flag.String("afp-extension-map", "", "Netatalk-compatible extension map file for Macintosh type/creator fallback") afpDecomposedFilenames := flag.Bool("afp-use-decomposed-names", true, "Encode host-reserved filename characters using 0xNN tokens when mapping AFP paths") afpCNIDBackend := flag.String("afp-cnid-backend", "sqlite", "CNID backend to use for AFP object IDs (sqlite or memory)") - afpAppleDoubleMode := flag.String("afp-appledouble-mode", string(afp.AppleDoubleModeModern), "AppleDouble metadata mode: modern or legacy") + afpAppleDoubleMode := flag.String("afp-appledouble-mode", "modern", "AppleDouble metadata mode: modern or legacy") var afpVolumes volumeFlags flag.Var(&afpVolumes, "afp-volume", `AFP volume to share, format: "Name:Path" (repeatable, e.g. -afp-volume "Mac Share:c:\mac")`) @@ -110,81 +110,89 @@ func main() { selectedConfig := *configPath if selectedConfig == "" && flag.NFlag() == 0 { - if _, err := os.Stat("server.ini"); err == nil { - selectedConfig = "server.ini" + if _, err := os.Stat("server.toml"); err == nil { + selectedConfig = "server.toml" } else if os.IsNotExist(err) { flag.Usage() return } else { - log.Fatalf("failed checking default config file server.ini: %v", err) + log.Fatalf("failed checking default config file server.toml: %v", err) } } - if selectedConfig != "" { - cfg, err := loadConfigFromINI(selectedConfig) + var ( + cfg appConfig + configSource config.Source + ) + fromConfigFile := selectedConfig != "" + if fromConfigFile { + loaded, src, err := loadConfigFromFile(selectedConfig) if err != nil { log.Fatalf("failed loading config file %q: %v", selectedConfig, err) } - - *logLevel = cfg.LogLevel - *logTraffic = cfg.LogTraffic - - *ltoudp = cfg.LToUDPEnabled - *ltIface = cfg.LToUDPInterface - *ltNet = cfg.LToUDPSeedNetwork - *ltZone = cfg.LToUDPSeedZone - - *tashtalkSerial = cfg.TashTalkPort - *ttNet = cfg.TashTalkSeedNetwork - *ttZone = cfg.TashTalkSeedZone - - *pcapDev = cfg.EtherTalkDevice - *etBackend = cfg.EtherTalkBackend - *pcapHWAddr = cfg.EtherTalkHWAddr - *etBridgeMode = cfg.EtherTalkBridgeMode - *etBridgeHostMAC = cfg.EtherTalkBridgeHostMAC - *etNetMin = cfg.EtherTalkSeedNetworkMin - *etNetMax = cfg.EtherTalkSeedNetworkMax - *etZone = cfg.EtherTalkSeedZone - - *macipEnable = cfg.MacIPEnabled - *macipGWIP = cfg.MacIPGWIP - *macipSubnet = cfg.MacIPSubnet - *macipNameserver = cfg.MacIPNameserver - *macipZone = cfg.MacIPZone - *macipIPGW = cfg.MacIPGatewayIP - *macipNAT = cfg.MacIPNAT - *macipDHCP = cfg.MacIPDHCPRelay - *macipStateFile = cfg.MacIPLeaseFile - - *parsePackets = cfg.ParsePackets - *parseOutput = cfg.ParseOutput - - *afpServerName = cfg.AFPServerName - *afpZone = cfg.AFPZone - *afpProtocols = cfg.AFPProtocols - *afpTCPAddr = cfg.AFPTCPBinding - *afpExtensionMap = cfg.AFPExtensionMapPath - *afpDecomposedFilenames = cfg.AFPDecomposedFilenames - *afpCNIDBackend = cfg.AFPCNIDBackend - afpVolumes = volumeFlags(cfg.AFPVolumes) - } - - if level, ok := netlog.ParseLevel(*logLevel); ok { + cfg = loaded + configSource = src + } else { + cfg = flagsToConfig(flagInputs{ + LogLevel: *logLevel, + LogTraffic: *logTraffic, + ParsePackets: *parsePackets, + ParseOutput: *parseOutput, + LToUDPEnabled: *ltoudp, + LToUDPInterface: *ltIface, + LToUDPSeedNetwork: *ltNet, + LToUDPSeedZone: *ltZone, + TashTalkPort: *tashtalkSerial, + TashTalkSeedNetwork: *ttNet, + TashTalkSeedZone: *ttZone, + EtherTalkDevice: *pcapDev, + EtherTalkBackend: *etBackend, + EtherTalkHWAddress: *pcapHWAddr, + EtherTalkBridgeMode: *etBridgeMode, + EtherTalkBridgeHostMAC: *etBridgeHostMAC, + EtherTalkSeedNetworkMin: *etNetMin, + EtherTalkSeedNetworkMax: *etNetMax, + EtherTalkSeedZone: *etZone, + EtherTalkDesiredNetwork: *etDesiredNet, + EtherTalkDesiredNode: *etDesiredNode, + MacIPEnabled: *macipEnable, + MacIPGWIP: *macipGWIP, + MacIPSubnet: *macipSubnet, + MacIPNameserver: *macipNameserver, + MacIPZone: *macipZone, + MacIPGatewayIP: *macipIPGW, + MacIPNAT: *macipNAT, + MacIPDHCPRelay: *macipDHCP, + MacIPLeaseFile: *macipStateFile, + }) + } + + if level, ok := netlog.ParseLevel(cfg.LogLevel); ok { netlog.SetLevel(level) } else { - log.Fatalf("unknown -log-level %q (want debug, info, or warn)", *logLevel) + log.Fatalf("unknown -log-level %q (want debug, info, or warn)", cfg.LogLevel) } - if *logTraffic { + // Install a pkg/logging root logger as the netlog shim's target so + // output flows through slog with source tagging and structured + // attributes. Each service will eventually take a *slog.Logger + // directly; until then, netlog.* calls forward here. + slogLevel, _ := logging.ParseLevel(cfg.LogLevel) + rootLogger := logging.New("OmniTalk", logging.Options{ + Sinks: []logging.Sink{{Writer: os.Stderr, Format: logging.FormatConsole, Level: slogLevel}}, + }) + logging.SetDefault(rootLogger) + netlog.SetLogger(rootLogger) + + if cfg.LogTraffic { netlog.SetLogFunc(func(s string) { netlog.Debug("%s", s) }) } - *etBackend = strings.ToLower(strings.TrimSpace(*etBackend)) - switch *etBackend { + cfg.EtherTalk.Backend = strings.ToLower(strings.TrimSpace(cfg.EtherTalk.Backend)) + switch cfg.EtherTalk.Backend { case "", "pcap", "tap", "tun": default: - log.Fatalf("invalid -ethertalk-backend %q (want pcap, tap, or tun)", *etBackend) + log.Fatalf("invalid -ethertalk-backend %q (want pcap, tap, or tun)", cfg.EtherTalk.Backend) } if *listPcap { @@ -213,54 +221,59 @@ func main() { return } - if *pcapDev == "" && *etBackend == "pcap" { + if cfg.EtherTalk.Device == "" && cfg.EtherTalk.Backend == "pcap" { if detected, ok := rawlink.DetectDefaultPcapInterface(); ok { netlog.Info("[MAIN] auto-detected pcap interface: %s", detected) - *pcapDev = detected + cfg.EtherTalk.Device = detected } } - if *pcapDev != "" && *etBackend == "pcap" && strings.TrimSpace(*etBridgeHostMAC) == "" { - if hostMAC, ok := rawlink.DetectHostMACForPcapInterface(*pcapDev); ok { - *etBridgeHostMAC = hostMAC - netlog.Info("[MAIN] auto-detected bridge host MAC for %s: %s", *pcapDev, hostMAC) + if cfg.EtherTalk.Device != "" && cfg.EtherTalk.Backend == "pcap" && strings.TrimSpace(cfg.EtherTalk.BridgeHostMAC) == "" { + if hostMAC, ok := rawlink.DetectHostMACForPcapInterface(cfg.EtherTalk.Device); ok { + cfg.EtherTalk.BridgeHostMAC = hostMAC + netlog.Info("[MAIN] auto-detected bridge host MAC for %s: %s", cfg.EtherTalk.Device, hostMAC) } } var ports []port.Port - if *ltoudp { - ports = append(ports, localtalk.NewLtoudpPort(*ltIface, uint16(*ltNet), []byte(*ltZone))) + if cfg.LToUDP.Enabled { + ports = append(ports, localtalk.NewLtoudpPort(cfg.LToUDP.Interface, uint16(cfg.LToUDP.SeedNetwork), []byte(cfg.LToUDP.SeedZone))) } - if *tashtalkSerial != "" { - ports = append(ports, localtalk.NewTashTalkPort(*tashtalkSerial, uint16(*ttNet), []byte(*ttZone))) + if cfg.TashTalk.Port != "" { + ports = append(ports, localtalk.NewTashTalkPort(cfg.TashTalk.Port, uint16(cfg.TashTalk.SeedNetwork), []byte(cfg.TashTalk.SeedZone))) } - if *pcapDev != "" { - hwAddr, err := parseMAC(*pcapHWAddr) + if cfg.EtherTalk.Device != "" { + hwAddr, err := hwaddr.ParseEthernet(cfg.EtherTalk.HWAddress) if err != nil { log.Fatalf("invalid -ethertalk-hw-address: %v", err) } - var ep *ethertalk.PcapPort - switch *etBackend { + opts := ethertalk.Options{ + InterfaceName: cfg.EtherTalk.Device, + HWAddr: hwAddr.Bytes(), + SeedNetworkMin: uint16(cfg.EtherTalk.SeedNetworkMin), + SeedNetworkMax: uint16(cfg.EtherTalk.SeedNetworkMax), + DesiredNetwork: uint16(cfg.EtherTalk.DesiredNetwork), + DesiredNode: uint8(cfg.EtherTalk.DesiredNode), + SeedZoneNames: [][]byte{[]byte(cfg.EtherTalk.SeedZone)}, + BridgeMode: cfg.EtherTalk.BridgeMode, + } + if cfg.EtherTalk.BridgeHostMAC != "" { + hostMAC, err := hwaddr.ParseEthernet(cfg.EtherTalk.BridgeHostMAC) + if err != nil { + log.Fatalf("invalid -ethertalk-bridge-host-mac: %v", err) + } + opts.BridgeHostMAC = hostMAC.Bytes() + } + var ep port.Port + switch cfg.EtherTalk.Backend { case "", "pcap": - ep, err = ethertalk.NewPcapPort(*pcapDev, hwAddr, uint16(*etNetMin), uint16(*etNetMax), uint16(*etDesiredNet), uint8(*etDesiredNode), [][]byte{[]byte(*etZone)}) + ep, err = ethertalk.NewPcapPort(opts) case "tap", "tun": - ep, err = ethertalk.NewTapPort(*pcapDev, hwAddr, uint16(*etNetMin), uint16(*etNetMax), uint16(*etDesiredNet), uint8(*etDesiredNode), [][]byte{[]byte(*etZone)}) + ep, err = ethertalk.NewTapPort(opts) default: - log.Fatalf("unsupported EtherTalk backend: %q", *etBackend) + log.Fatalf("unsupported EtherTalk backend: %q", cfg.EtherTalk.Backend) } if err != nil { - log.Fatalf("failed creating EtherTalk port (%s): %v", *etBackend, err) - } - if err := ep.SetBridgeModeString(*etBridgeMode); err != nil { - log.Fatalf("invalid -ethertalk-bridge-mode: %v", err) - } - if *etBridgeHostMAC != "" { - hostMAC, err := parseMAC(*etBridgeHostMAC) - if err != nil { - log.Fatalf("invalid -ethertalk-bridge-host-mac: %v", err) - } - if err := ep.SetBridgeHostMAC(hostMAC); err != nil { - log.Fatalf("invalid -ethertalk-bridge-host-mac: %v", err) - } + log.Fatalf("failed creating EtherTalk port (%s): %v", cfg.EtherTalk.Backend, err) } ports = append(ports, ep) } @@ -282,196 +295,58 @@ func main() { zip.NewSendingService(), } - var macipSvc *macip.Service - - if *macipEnable { - if *etBackend != "" && *etBackend != "pcap" { - log.Fatalf("-macip-enabled currently requires -ethertalk-backend pcap (got %q)", *etBackend) - } - - // MacIP shares the EtherTalk pcap interface; fall back to auto-detection. - ipIface := *pcapDev - if ipIface == "" { - if detected, ok := rawlink.DetectDefaultPcapInterface(); ok { - ipIface = detected - netlog.Info("[MAIN][MacIP] auto-detected pcap interface: %s", detected) - } else { - log.Fatal("-ethertalk-device is required when -macip-enabled is set (auto-detection failed)") - } - } - - // Auto-detect IP-side MAC from the bridge host MAC or the interface itself. - ipMACStr := "" - if strings.TrimSpace(*etBridgeHostMAC) != "" { - ipMACStr = *etBridgeHostMAC - netlog.Info("[MAIN][MacIP] using bridge host MAC for IP-side: %s", ipMACStr) - } else if hostMAC, ok := rawlink.DetectHostMACForPcapInterface(ipIface); ok { - ipMACStr = hostMAC - netlog.Info("[MAIN][MacIP] auto-detected IP-side MAC from %s: %s", ipIface, ipMACStr) - } else { - ipMACStr = *pcapHWAddr - } - - hostIPStr, hostIPDetected := detectPcapInterfaceIPv4(ipIface) - - if *macipIPGW == "" { - if gw, ok := rawlink.DetectDefaultGatewayForPcapInterface(ipIface); ok { - *macipIPGW = gw - netlog.Info("[MAIN][MacIP] auto-detected default gateway %s for interface %s", gw, ipIface) - } else if hostIPDetected { - *macipIPGW = hostIPStr - netlog.Warn("[MAIN][MacIP] default gateway auto-detection failed; falling back to interface IPv4 %s on %s", hostIPStr, ipIface) - } else { - log.Fatal("-macip-ip-gateway is required when -macip-enabled is set (auto-detection failed and no IPv4 address was found)") - } - } - - _, ipNet, err := net.ParseCIDR(*macipSubnet) - if err != nil { - log.Fatalf("invalid -macip-nat-subnet: %v", err) - } - ipMAC, err := parseMAC(ipMACStr) - if err != nil { - log.Fatalf("invalid IP-side MAC: %v", err) - } - ipGW := net.ParseIP(*macipIPGW).To4() - if ipGW == nil { - log.Fatalf("invalid -macip-ip-gateway: %q", *macipIPGW) - } - var hostIP net.IP - if hostIPDetected { - hostIP = net.ParseIP(hostIPStr).To4() - } - gwIP := resolveMacIPGatewayIP(*macipGWIP, ipNet, ipGW, *macipNAT) - if gwIP == nil { - log.Fatalf("invalid -macip-nat-gw: %q", *macipGWIP) - } - if !*macipNAT && strings.TrimSpace(*macipGWIP) != "" { - netlog.Info("[MAIN][MacIP] ignoring -macip-nat-gw in non-NAT mode; using upstream gateway %s", gwIP) - } else if !*macipNAT { - netlog.Info("[MAIN][MacIP] using upstream gateway %s in non-NAT mode", gwIP) - } - if *macipNAT && gwIP.Equal(ipGW) { - log.Fatalf("invalid MacIP configuration: -macip-nat-gw (%s) conflicts with the host-side upstream gateway (%s); choose a different MacIP gateway IP", gwIP, ipGW) - } - nsIP := ipGW // default: physical gateway typically also serves DNS - if *macipNameserver != "" { - nsIP = net.ParseIP(*macipNameserver).To4() - if nsIP == nil { - log.Fatalf("invalid -macip-nameserver: %q", *macipNameserver) - } - } - - broadcast := broadcastAddr(ipNet) - // Choose the NBP zone: explicit -macip-zone wins, then EtherTalk seed zone, - // otherwise leave empty so the service picks the first zone found at start. - var chosenZone []byte - if *macipZone != "" { - chosenZone = []byte(*macipZone) - } else if *etZone != "" { - chosenZone = []byte(*etZone) - } - - // Open MacIP rawlink and apply BPF filter before injecting into the service. - ipLink, err := rawlink.OpenPcap(rawlink.DefaultMacIPConfig(ipIface)) - if err != nil { - log.Fatalf("failed opening MacIP rawlink on %s: %v", ipIface, err) - } - if fl, ok := ipLink.(rawlink.FilterableLink); ok { - if err := fl.SetFilter(macipBPFFilter(ipNet, *macipDHCP)); err != nil { - netlog.Warn("[MAIN][MacIP] could not set BPF filter on %s: %v", ipIface, err) - } - } - - macipSvc = macip.New( - gwIP, ipNet.IP, ipNet.Mask, - nsIP, broadcast, - chosenZone, - nbpSvc, - ipLink, ipMAC, hostIP, ipGW, - *macipNAT, - *macipDHCP, - *macipStateFile, - ) - services = append(services, macipSvc) - netlog.Info("[MAIN][MacIP] gw=%s subnet=%s iface=%s host-ip=%s ip-gw=%s zone=%q nat=%t dhcp_relay=%t", - gwIP, *macipSubnet, ipIface, hostIP, ipGW, string(chosenZone), *macipNAT, *macipDHCP) - } - - if len(afpVolumes) > 0 { - var transports []afp.Transport - var extMap *afp.ExtensionMap - if *afpExtensionMap != "" { - loadedMap, err := loadAFPExtensionMap(*afpExtensionMap) - if err != nil { - log.Fatalf("failed loading AFP extension map %q: %v", *afpExtensionMap, err) - } - extMap = loadedMap - } - - protocols := strings.Split(*afpProtocols, ",") - hasDDP := false - hasTCP := false - for _, p := range protocols { - p = strings.TrimSpace(p) - if strings.EqualFold(p, "ddp") { - hasDDP = true - } else if strings.EqualFold(p, "tcp") { - hasTCP = true - } - } - - if hasDDP { - aspSvc := asp.New(*afpServerName, nil, nbpSvc, []byte(*afpZone)) - if macipSvc != nil { - aspSvc.SetSessionLifecycleHooks( - func(sess *asp.Session) { - macipSvc.PinLeaseToSession(sess.WSNet, sess.WSNode, sess.ID) - }, - func(sess *asp.Session) { - macipSvc.UnpinLeaseFromSession(sess.ID) - }, - func(sess *asp.Session) { - macipSvc.MarkSessionActivity(sess.ID) - }, - ) - } - transports = append(transports, aspSvc) - netlog.Info("[MAIN][AFP] enabled DDP transport on socket %d", asp.ServerSocket) - } - - if hasTCP { - dsiSvc := dsi.NewServer(*afpServerName, *afpTCPAddr, nil) - transports = append(transports, dsiSvc) - netlog.Info("[MAIN][AFP] enabled TCP transport on %s", *afpTCPAddr) - } - - afpSvc := afp.NewAFPService( - *afpServerName, - []afp.VolumeConfig(afpVolumes), - &afp.LocalFileSystem{}, - transports, - afp.AFPOptions{DecomposedFilenames: *afpDecomposedFilenames, CNIDBackend: *afpCNIDBackend, AppleDoubleMode: parseAppleDoubleMode(*afpAppleDoubleMode), ExtensionMap: extMap}, - ) - - // Wire up the circular dependencies for handlers - for _, t := range transports { - switch transport := t.(type) { - case *asp.Service: - transport.SetCommandHandler(afpSvc) - case *dsi.Server: - transport.SetCommandHandler(afpSvc) - } - } - - services = append(services, afpSvc) - netlog.Info("[MAIN][AFP] server=%q volumes=%d zone=%q protocols=%q", *afpServerName, len(afpVolumes), *afpZone, *afpProtocols) + macIP, err := wireMacIP(MacIPConfig{ + Enabled: cfg.MacIPEnabled, + NATGatewayIP: cfg.MacIPGWIP, + NATSubnet: cfg.MacIPSubnet, + Nameserver: cfg.MacIPNameserver, + Zone: cfg.MacIPZone, + IPGateway: cfg.MacIPGatewayIP, + NAT: cfg.MacIPNAT, + DHCPRelay: cfg.MacIPDHCPRelay, + StateFile: cfg.MacIPLeaseFile, + PcapDevice: cfg.EtherTalk.Device, + BridgeHostMAC: cfg.EtherTalk.BridgeHostMAC, + PcapHWAddr: cfg.EtherTalk.HWAddress, + EtherTalkZone: cfg.EtherTalk.SeedZone, + EtherTalkBackend: cfg.EtherTalk.Backend, + NBP: nbpSvc, + }) + if err != nil { + log.Fatalf("MacIP wiring failed: %v", err) + } + if macIP != nil { + services = append(services, macIP.Service()) + } + + afpHook, err := wireAFP(AFPWiring{ + Source: configSource, + FromConfig: fromConfigFile, + NBP: nbpSvc, + Flags: AFPFlagInputs{ + ServerName: *afpServerName, + Zone: *afpZone, + Protocols: *afpProtocols, + TCPAddr: *afpTCPAddr, + ExtensionMap: *afpExtensionMap, + DecomposedNames: *afpDecomposedFilenames, + CNIDBackend: *afpCNIDBackend, + AppleDoubleMode: *afpAppleDoubleMode, + VolumeFlagValues: []string(afpVolumes), + }, + }) + if err != nil { + log.Fatalf("AFP wiring failed: %v", err) + } + if macIP != nil { + afpHook.AttachMacIP(macIPAFPHooks{macIP}) } + services = append(services, afpHook.Services()...) r := router.New("router", ports, services) - if *parsePackets { - dumper, cleanup, err := newPacketDumper(*parseOutput) + if cfg.ParsePackets { + dumper, cleanup, err := newPacketDumper(cfg.ParseOutput) if err != nil { log.Fatalf("parse-packets: %v", err) } @@ -481,17 +356,18 @@ func main() { aware.SetPacketDumper(dumper) } } - netlog.Info("[MAIN] parse-packets enabled; output=%q", *parseOutput) + netlog.Info("[MAIN] parse-packets enabled; output=%q", cfg.ParseOutput) } - if err := r.Start(); err != nil { + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer stop() + + if err := r.Start(ctx); err != nil { log.Fatalf("failed to start router: %v", err) } netlog.Info("[MAIN] router away!") - sig := make(chan os.Signal, 1) - signal.Notify(sig, os.Interrupt, syscall.SIGTERM) - <-sig + <-ctx.Done() if err := r.Stop(); err != nil { netlog.Warn("[MAIN] stop warning: %v", err) @@ -508,41 +384,19 @@ func broadcastAddr(n *net.IPNet) net.IP { return bcast } -// volumeFlags is a repeatable -afp-volume flag. -type volumeFlags []afp.VolumeConfig +// volumeFlags is a repeatable -afp-volume flag. The raw "Name:Path" +// strings are forwarded to wireAFP, where the //go:build afp side +// parses them via afp.ParseVolumeFlag. Keeping this neutral lets +// minimal-build users still pass -afp-volume and get a clean warning. +type volumeFlags []string func (v *volumeFlags) String() string { return "" } func (v *volumeFlags) Set(s string) error { - cfg, err := afp.ParseVolumeFlag(s) - if err != nil { - return err - } - *v = append(*v, cfg) + *v = append(*v, s) return nil } -func parseMAC(s string) ([]byte, error) { - normalized := strings.ReplaceAll(strings.ReplaceAll(strings.TrimSpace(s), ":", ""), "-", "") - if len(normalized) != 12 { - return nil, fmt.Errorf("want 12 hex digits, got %d", len(normalized)) - } - b, err := hex.DecodeString(normalized) - if err != nil { - return nil, err - } - return b, nil -} - -func parseAppleDoubleMode(mode string) afp.AppleDoubleMode { - switch strings.ToLower(strings.TrimSpace(mode)) { - case "legacy", string(afp.AppleDoubleModeLegacy): - return afp.AppleDoubleModeLegacy - default: - return afp.AppleDoubleModeModern - } -} - func detectPcapInterfaceIPv4(interfaceName string) (string, bool) { if strings.TrimSpace(interfaceName) == "" { return "", false @@ -586,24 +440,6 @@ func selectPreferredIPv4(addrs []string) (string, bool) { return "", false } -func resolveMacIPGatewayIP(configured string, natSubnet *net.IPNet, upstreamGateway net.IP, natMode bool) net.IP { - if !natMode { - return append(net.IP(nil), upstreamGateway.To4()...) - } - trimmed := strings.TrimSpace(configured) - if trimmed != "" { - return net.ParseIP(trimmed).To4() - } - return firstUsableIPv4(natSubnet) -} - -func macipBPFFilter(ipNet *net.IPNet, dhcpMode bool) string { - if dhcpMode { - return "(arp) or (ip) or (udp dst port 68)" - } - return fmt.Sprintf("(arp) or (dst net %s)", ipNet.String()) -} - func firstUsableIPv4(n *net.IPNet) net.IP { if n == nil { return nil diff --git a/cmd/omnitalk/packetdump.go b/cmd/omnitalk/packetdump.go index ff6ba7f..df1e1b5 100644 --- a/cmd/omnitalk/packetdump.go +++ b/cmd/omnitalk/packetdump.go @@ -6,7 +6,7 @@ import ( "log" "os" - "github.com/pgodw/omnitalk/go/service" + "github.com/pgodw/omnitalk/service" ) // PacketDumper is a generic sink used by services to emit parsed packet logs. diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..9f75994 --- /dev/null +++ b/config/config.go @@ -0,0 +1,43 @@ +// Package config abstracts where OmniTalk's configuration comes from +// (TOML file today; environment variables, JSON, etc. tomorrow). It owns +// no schema knowledge: each component decides what keys it consumes by +// reading from the returned koanf instance. +// +// Defaults live with the consumers (typically as flag defaults in +// cmd/omnitalk). The config package's only job is to surface a populated +// koanf source to those consumers. +package config + +import ( + "path/filepath" + + "github.com/knadh/koanf/parsers/toml/v2" + "github.com/knadh/koanf/providers/file" + "github.com/knadh/koanf/v2" +) + +// Source is a parsed configuration source. Components read keys from K +// using their own schema. ConfigDir is the directory of the source file +// (or "" when no file backed the source) and is useful for resolving +// paths declared relative to the config file. +type Source struct { + K *koanf.Koanf + ConfigDir string +} + +// Empty returns a Source backed by an empty koanf instance — useful when +// no config file is present and consumers should fall back entirely to +// flag defaults. +func Empty() Source { + return Source{K: koanf.New("."), ConfigDir: ""} +} + +// Load parses path as TOML and returns a Source. The koanf delimiter is +// "." so nested tables (e.g. [Volumes.Default]) become "Volumes.Default". +func Load(path string) (Source, error) { + k := koanf.New(".") + if err := k.Load(file.Provider(path), toml.Parser()); err != nil { + return Source{K: k}, err + } + return Source{K: k, ConfigDir: filepath.Dir(path)}, nil +} diff --git a/config/loadtoml_test.go b/config/loadtoml_test.go new file mode 100644 index 0000000..ffce6e8 --- /dev/null +++ b/config/loadtoml_test.go @@ -0,0 +1,19 @@ +package config + +import "testing" + +// TestLoad_ExampleFile loads the canonical server.toml.example from the +// repo root to make sure the parser still accepts the shipped example. +// Schema-level checks live with the consumers (e.g. service/afp). +func TestLoad_ExampleFile(t *testing.T) { + src, err := Load("../server.toml.example") + if err != nil { + t.Fatalf("Load(server.toml.example): %v", err) + } + if got := src.K.String("AFP.name"); got != "OmniTalk" { + t.Fatalf("AFP.name = %q, want %q", got, "OmniTalk") + } + if vols := src.K.MapKeys("AFP.Volumes"); len(vols) != 2 { + t.Fatalf("AFP.Volumes = %d, want 2", len(vols)) + } +} diff --git a/dist/Sample Volume/_.afp.db b/dist/Sample Volume/_.afp.db index 95f8349..116a197 100644 Binary files a/dist/Sample Volume/_.afp.db and b/dist/Sample Volume/_.afp.db differ diff --git a/dist/server.ini b/dist/server.ini deleted file mode 100644 index 8ec6fdb..0000000 --- a/dist/server.ini +++ /dev/null @@ -1,65 +0,0 @@ -[LToUdp] -; LocalTalk over UDP Settings (used by Mini vMac UDP builds and SNOW emu) -enabled = true ; Enable LToUDP - true for on, false for off -seed_network = 1 ; LToUDO seed network number -seed_zone = "LToUDP Network" ; LToUDP seed zone name. - -[TashTalk] -; TashTalk is a PIC-based RS482 localtalk to serial adaptor -port = ; blank to disable, otherwise the serial port to use (eg COM1, /dev/ttyAMA0) -seed_network = 2 ; TashTalk seed network number -seed_zone = "TashTalk Network" ; TashTalk seed zone name - -[EtherTalk] -; Ethertalk is a pcap based Network Bridge -backend = pcap ; supported: pcap, tap, tun. Leave blank to disable ethertalk. -device = "" ; PCap device name. Call with -list-pcap-devices to see what to use. Linux /dev/eth0. Windows: "\Device\NPF_{B7D4E073-2185-4912-BBE8-3948C6636D02}". -hw_address = "DE:AD:BE:EF:CA:FE" ; EtherTalk Hardware Address to use for router. -seed_network_min = 3 ; EtherTalk seed network number -seed_network_max = 5 ; EtherTalk seed network -seed_zone = "EtherTalk Network" ; EtherTalk seed zone name -bridge_mode = auto ; auto (default), ethernet, or wifi. Use wifi for bridge-shim rewriting on Wi-Fi adapters. -bridge_host_mac = ; optional host adapter MAC for Wi-Fi bridge shim. Defaults to hw_address when blank. - - -[MacIP] -; MacIP Gateway Settings. Allows TCP over DDP. -enabled = true ; true to enable MacIP Gateway, false to disable -mode = pcap ; modes are pcap or nat. -zone = ; MacIP Gateway Zone, defaults to EtherTalk zone, otherwise the first zone detected. -nat_subnet = ; in NAT mode, the subnet to use (eg 192.168.100.0/24) -nat_gw = ; in NAT mode, the IP Address to use for the gateway (eg 192.168.100.1) -lease_file = leases.txt ; in NAT mode, persist DHCP leases to the specified file -ip_gateway = ; Upstream/default gateway on the IP-side network -dhcp_relay = true ; DHCP Relay, converts MacTCP Auto Config to DHCP requests -nameserver = 1.1.1.1 ; Name server for DNS - - -[AFP] -; Apple Filing Protocol Server Settings -enabled = true ; true to enable AFP Server, false to disable -name = "OmniTalk" ; Name of the server to use. Max length of 31 characters. -zone = "EtherTalk Network" ; Name of the AppleTalk Zone to list the server in -protocols = ddp,tcp ; Protocols to use. Supports ddp (AppleTalk) and tcp (TCP/IP). They can be combined (eg ddp,tcp) -binding = ":548" ; When TCP is enabled, the IP+Port to bind the service to. -extension_map = "extmap.conf" ; Netatalk compatible extension mapping file - -; AFP Volume Configuration. Each volume must have a section for this. -[Volumes.Default] -name = "Welcome" ; Volume Name. Max Length of 31 characters. -path = "./Sample Volume" ; Host path for the volume. Eg "/media/Mac", "C:\Foo" -read_only = true ; When true, the volume will be advertised as read-only and write operations will be rejected. Default is false. -rebuild_desktop_db = true ; When true, rebuilds the desktop database from resource forks. Default is false. - -[Volumes.Shared] -name = "Shared" ; Volume Name. Max Length of 31 characters. -path = "./shared" ; Host path for the volume. Eg "/media/Mac", "C:\Foo" -rebuild_desktop_db = false ; When true, rebuilds the desktop database from resource forks. Default is false. - -[Logging] -level = warn -parse_packets = false -log_traffic = false - - - diff --git a/dist/server.toml b/dist/server.toml new file mode 100644 index 0000000..b221dc0 --- /dev/null +++ b/dist/server.toml @@ -0,0 +1,59 @@ +[LToUdp] +# LocalTalk over UDP Settings (used by Mini vMac UDP builds and SNOW emu) +enabled = true # Enable LToUDP - true for on, false for off +seed_network = 1 # LToUDP seed network number +seed_zone = "LToUDP Network" + +[TashTalk] +# TashTalk is a PIC-based RS422 LocalTalk to serial adaptor +port = "" # blank to disable, otherwise the serial port to use (eg COM1, /dev/ttyAMA0) +seed_network = 2 +seed_zone = "TashTalk Network" + +[EtherTalk] +# EtherTalk is a pcap-based network bridge +backend = "pcap" # supported: pcap, tap, tun. Leave blank to disable. +device = "" # PCap device name. Use -list-pcap-devices to see candidates. +hw_address = "DE:AD:BE:EF:CA:FE" +seed_network_min = 3 +seed_network_max = 5 +seed_zone = "EtherTalk Network" +bridge_mode = "auto" # auto (default), ethernet, or wifi +bridge_host_mac = "" + +[MacIP] +# MacIP Gateway Settings. Allows TCP over DDP. +enabled = true +mode = "pcap" # pcap or nat +zone = "" +nat_subnet = "" +nat_gw = "" +lease_file = "leases.txt" +ip_gateway = "" +dhcp_relay = true +nameserver = "1.1.1.1" + +[AFP] +enabled = true +name = "OmniTalk" +zone = "EtherTalk Network" +protocols = "ddp,tcp" +binding = ":548" +extension_map = "extmap.conf" + +# AFP Volume Configuration — each volume gets an [AFP.Volumes.] section. +[AFP.Volumes.Default] +name = "Welcome" +path = "./Sample Volume" +read_only = true +rebuild_desktop_db = true + +[AFP.Volumes.Shared] +name = "Shared" +path = "./shared" +rebuild_desktop_db = false + +[Logging] +level = "warn" +parse_packets = false +log_traffic = false diff --git a/extmap.conf b/extmap.conf index 175f74f..db45dab 100644 --- a/extmap.conf +++ b/extmap.conf @@ -9,7 +9,7 @@ ##. "BINA" "UNIX" Unix Binary Unix application/octet-stream ##. "TEXT" "ttxt" ASCII Text SimpleText text/plain -#.1st "TEXT" "ttxt" Text Readme SimpleText application/text +.1st "TEXT" "ttxt" Text Readme SimpleText application/text #.669 "6669" "SNPL" 669 MOD Music PlayerPro #.8med "STrk" "SCPL" Amiga OctaMed music SoundApp #.8svx "8SVX" "SCPL" Amiga 8-bit sound SoundApp @@ -80,7 +80,7 @@ #.for "TEXT" "MPS " Fortran Source MPW Shell #.fts "FITS" "GKON" Flexible Image Transport GraphicConverter #.gem "GEM-" "GKON" GEM Metafile GraphicConverter -#.gif "GIFf" "ogle" GIF Picture PictureViewer image/gif +.gif "GIFf" "ogle" GIF Picture PictureViewer image/gif #.gl "GL " "AnVw" GL Animation MacAnim Viewer #.grp "GRPp" "GKON" GRP Image GraphicConverter #.gz "SIT!" "SITx" Gnu ZIP Archive StuffIt Expander application/x-gzip @@ -90,8 +90,8 @@ #.hpgl "HPGL" "GKON" HP GL/2 GraphicConverter #.hpp "TEXT" "CWIE" C Include File CodeWarrior .hqx "TEXT" "SITx" BinHex StuffIt Expander application/mac-binhex40 -#.htm "TEXT" "MOSS" HyperText Netscape Communicator text/html -#.html "TEXT" "MOSS" HyperText Netscape Communicator text/html +.htm "TEXT" "MOSS" HyperText Netscape Communicator text/html +.html "TEXT" "MOSS" HyperText Netscape Communicator text/html #.i3 "TEXT" "R*ch" Modula 3 Interface BBEdit #.ic1 "IMAG" "GKON" Atari Image GraphicConverter #.ic2 "IMAG" "GKON" Atari Image GraphicConverter @@ -106,9 +106,9 @@ #.ini "TEXT" "ttxt" Windows INI File SimpleText #.java "TEXT" "CWIE" Java Source File CodeWarrior #.jfif "JPEG" "ogle" JFIF Image PictureViewer -#.jpe "JPEG" "ogle" JPEG Picture PictureViewer image/jpeg -#.jpeg "JPEG" "ogle" JPEG Picture PictureViewer image/jpeg -#.jpg "JPEG" "ogle" JPEG Picture PictureViewer image/jpeg +.jpe "JPEG" "ogle" JPEG Picture PictureViewer image/jpeg +.jpeg "JPEG" "ogle" JPEG Picture PictureViewer image/jpeg +.jpg "JPEG" "ogle" JPEG Picture PictureViewer image/jpeg #.latex "TEXT" "OTEX" Latex OzTex application/x-latex #.lbm "ILBM" "GKON" Amiga IFF Image GraphicConverter #.lha "LHA " "SITx" LHArc Archive StuffIt Expander @@ -133,7 +133,7 @@ #.mod "STrk" "SCPL" MOD Music SoundApp #.mol "TEXT" "RSML" MDL Molfile RasMac #.moov "MooV" "TVOD" QuickTime Movie MoviePlayer video/quicktime -#.mov "MooV" "TVOD" QuickTime Movie MoviePlayer video/quicktime +.mov "MooV" "TVOD" QuickTime Movie MoviePlayer video/quicktime #.mp2 "MPEG" "TVOD" MPEG-1 audiostream MoviePlayer audio/x-mpeg #.mp3 "MPG3" "TVOD" MPEG-3 audiostream MoviePlayer audio/x-mpeg #.mpa "MPEG" "TVOD" MPEG-1 audiostream MoviePlayer audio/x-mpeg @@ -163,15 +163,15 @@ #.pct "PICT" "ogle" PICT Picture PictureViewer image/x-pict #.pcx "PCXx" "GKON" PC PaintBrush GraphicConverter #.pdb "TEXT" "RSML" Brookhaven PDB file RasMac -#.pdf "PDF " "CARO" Portable Document Format Acrobat Reader application/pdf +.pdf "PDF " "CARO" Portable Document Format Acrobat Reader application/pdf #.pdx "TEXT" "ALD5" Printer Description PageMaker #.pf "CSIT" "SITx" Private File StuffIt Expander #.pgm "PPGM" "GKON" Portable Graymap GraphicConverter image/x-portable-graymap #.pi1 "Dega" "GKON" Atari Degas Image GraphicConverter #.pi2 "Dega" "GKON" Atari Degas Image GraphicConverter #.pi3 "Dega" "GKON" Atari Degas Image GraphicConverter -#.pic "PICT" "ogle" PICT Picture PictureViewer image/x-pict -#.pict "PICT" "ogle" PICT Picture PictureViewer image/x-macpict +.pic "PICT" "ogle" PICT Picture PictureViewer image/x-pict +.pict "PICT" "ogle" PICT Picture PictureViewer image/x-macpict #.pit "PIT " "SITx" PackIt Archive StuffIt Expander #.pkg "HBSF" "SITx" AppleLink Package StuffIt Expander #.pl "TEXT" "McPL" Perl Source MacPerl @@ -180,7 +180,7 @@ #.pm3 "ALB3" "ALD3" PageMaker 3 Document PageMaker #.pm4 "ALB4" "ALD4" PageMaker 4 Document PageMaker #.pm5 "ALB5" "ALD5" PageMaker 5 Document PageMaker -#.png "PNG " "ogle" Portable Network Graphic PictureViewer +.png "PNG " "ogle" Portable Network Graphic PictureViewer #.pntg "PNTG" "ogle" Macintosh Painting PictureViewer #.ppd "TEXT" "ALD5" Printer Description PageMaker #.ppm "PPGM" "GKON" Portable Pixmap GraphicConverter image/x-portable-pixmap @@ -191,11 +191,11 @@ #.pt5 "ALT5" "ALD5" PageMaker 5 Template PageMaker #.pxr "PXR " "8BIM" Pixar Image Photoshop #.qdv "QDVf" "GKON" QDV image GraphicConverter -#.qt "MooV" "TVOD" QuickTime Movie MoviePlayer video/quicktime +.qt "MooV" "TVOD" QuickTime Movie MoviePlayer video/quicktime #.qxd "XDOC" "XPR3" QuarkXpress Document QuarkXpress #.qxt "XTMP" "XPR3" QuarkXpress Template QuarkXpress #.raw "BINA" "GKON" Raw Image GraphicConverter -#.readme "TEXT" "ttxt" Text Readme SimpleText application/text +.readme "TEXT" "ttxt" Text Readme SimpleText application/text #.rgb "SGI " "GKON" SGI Image GraphicConverter image/x-rgb #.rgba "SGI " "GKON" SGI Image GraphicConverter image/x-rgb #.rib "TEXT" "RINI" Renderman 3D Data Renderman @@ -214,7 +214,7 @@ #.scp "RIX3" "GKON" ColoRIX GraphicConverter #.scr "RIX3" "GKON" ColoRIX GraphicConverter #.scu "RIX3" "GKON" ColoRIX GraphicConverter -#.sea "APPL" "????" Self-Extracting Archive Self Extracting Archive +.sea "APPL" "????" Self-Extracting Archive Self Extracting Archive #.sf "IRCM" "SDHK" IRCAM Sound SoundHack #.sgi ".SGI" "ogle" SGI Image PictureViewer #.sha "TEXT" "UnSh" Unix Shell Archive UnShar application/x-shar diff --git a/go.mod b/go.mod index 08f6e0f..5c63d15 100644 --- a/go.mod +++ b/go.mod @@ -1,41 +1,51 @@ -module github.com/pgodw/omnitalk/go +module github.com/pgodw/omnitalk go 1.23.0 toolchain go1.23.4 require ( + github.com/PuerkitoBio/goquery v1.10.0 github.com/google/gopacket v1.1.19 - github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07 + github.com/jacobsa/go-serial v0.0.0-20180131005756-15cf729a72d4 + github.com/knadh/koanf/parsers/toml/v2 v2.2.0 + github.com/knadh/koanf/providers/file v1.2.1 + github.com/knadh/koanf/v2 v2.3.4 golang.org/x/net v0.33.0 - gopkg.in/ini.v1 v1.67.1 + golang.org/x/sys v0.32.0 + modernc.org/sqlite v1.35.0 + tailscale.com v1.64.2 ) require ( github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect + github.com/andybalholm/cascadia v1.3.2 // indirect github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa // indirect github.com/dustin/go-humanize v1.0.1 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/jacobsa/go-serial v0.0.0-20180131005756-15cf729a72d4 // indirect github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect github.com/jsimonetti/rtnetlink v1.4.0 // indirect + github.com/knadh/koanf/maps v0.1.2 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mdlayher/netlink v1.7.2 // indirect github.com/mdlayher/socket v0.5.0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/stretchr/testify v1.11.1 // indirect go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.31.0 // indirect golang.org/x/text v0.21.0 // indirect golang.zx2c4.com/wireguard/windows v0.5.3 // indirect modernc.org/libc v1.61.13 // indirect modernc.org/mathutil v1.7.1 // indirect modernc.org/memory v1.8.2 // indirect - modernc.org/sqlite v1.35.0 // indirect - tailscale.com v1.64.2 // indirect ) diff --git a/go.sum b/go.sum index fed5a79..3f2ea6a 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,29 @@ +github.com/PuerkitoBio/goquery v1.10.0 h1:6fiXdLuUvYs2OJSvNRqlNPoBm6YABE226xrbavY5Wv4= +github.com/PuerkitoBio/goquery v1.10.0/go.mod h1:TjZZl68Q3eGHNBA8CWaxAN7rOU1EbDz3CWuolcO5Yu4= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= +github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa h1:h8TfIT1xc8FWbwwpmHn1J5i43Y0uZP97GqasGCzSRJk= github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo= +github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/jacobsa/go-serial v0.0.0-20180131005756-15cf729a72d4 h1:G2ztCwXov8mRvP0ZfjE6nAlaCX2XbykaeHdbT6KwDz0= @@ -19,74 +32,130 @@ github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0 github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= +github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= +github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/parsers/toml/v2 v2.2.0 h1:2nV7tHYJ5OZy2BynQ4mOJ6k5bDqbbCzRERLUKBytz3A= +github.com/knadh/koanf/parsers/toml/v2 v2.2.0/go.mod h1:JpjTeK1Ge1hVX0wbof5DMCuDBriR8bWgeQP98eeOZpI= +github.com/knadh/koanf/providers/file v1.2.1 h1:bEWbtQwYrA+W2DtdBrQWyXqJaJSG3KrP3AESOJYp9wM= +github.com/knadh/koanf/providers/file v1.2.1/go.mod h1:bp1PM5f83Q+TOUu10J/0ApLBd9uIzg+n9UgthfY+nRA= +github.com/knadh/koanf/v2 v2.3.4 h1:fnynNSDlujWE+v83hAp8wKr/cdoxHLO0629SN+U8Urc= +github.com/knadh/koanf/v2 v2.3.4/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07 h1:UyzmZLoiDWMRywV4DUYb9Fbt8uiOSooupjTq10vpvnU= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/ini.v1 v1.67.1 h1:tVBILHy0R6e4wkYOn3XmiITt/hEVH4TFMYvAX2Ytz6k= -gopkg.in/ini.v1 v1.67.1/go.mod h1:x/cyOwCgZqOkJoDIJ3c1KNHMo10+nLGAhh+kn3Zizss= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0= +modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.23.16 h1:Z2N+kk38b7SfySC1ZkpGLN2vthNJP1+ZzGZIlH7uBxo= +modernc.org/ccgo/v4 v4.23.16/go.mod h1:nNma8goMTY7aQZQNTyN9AIoJfxav4nvTnvKThAeMDdo= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.6.3 h1:aJVhcqAte49LF+mGveZ5KPlsp4tdGdAOT4sipJXADjw= +modernc.org/gc/v2 v2.6.3/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= modernc.org/libc v1.61.13 h1:3LRd6ZO1ezsFiX1y+bHd1ipyEHIJKvuprv0sLTBwLW8= modernc.org/libc v1.61.13/go.mod h1:8F/uJWL/3nNil0Lgt1Dpz+GgkApWh04N3el3hxJcA6E= modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= modernc.org/memory v1.8.2 h1:cL9L4bcoAObu4NkxOlKWBWtNHIsnnACGF/TbqQ6sbcI= modernc.org/memory v1.8.2/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= modernc.org/sqlite v1.35.0 h1:yQps4fegMnZFdphtzlfQTCNBWtS0CZv48pRpW3RFHRw= modernc.org/sqlite v1.35.0/go.mod h1:9cr2sicr7jIaWTBKQmAxQLfBv9LL0su4ZTEV+utt3ic= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= tailscale.com v1.64.2 h1:0VNwUsjK6CwgkqyaOANndBER2SMYl8JZ5uNRTvIqCnY= tailscale.com v1.64.2/go.mod h1:6kGByHNxnFfK1i4gVpdtvpdS1HicHohWXnsfwmXy64I= diff --git a/internal/testutil/mock_port.go b/internal/testutil/mock_port.go new file mode 100644 index 0000000..dba2b4b --- /dev/null +++ b/internal/testutil/mock_port.go @@ -0,0 +1,60 @@ +// Package testutil provides shared test helpers used across OmniTalk's +// service and port packages. Live under internal/ so external consumers +// cannot depend on these mocks; only project tests may import. +package testutil + +import ( + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/protocol/ddp" +) + +// MockPort is a fake port.Port whose behaviour is driven by func fields. +// Leave any field nil and its method is unsafe to call; wire up only the +// behaviours the test needs. +type MockPort struct { + ShortStringFunc func() string + StartFunc func(router port.RouterHooks) error + StopFunc func() error + UnicastFunc func(network uint16, node uint8, datagram ddp.Datagram) + BroadcastFunc func(datagram ddp.Datagram) + MulticastFunc func(zoneName []byte, datagram ddp.Datagram) + SetNetworkRangeFunc func(networkMin, networkMax uint16) error + NetworkFunc func() uint16 + NodeFunc func() uint8 + NetworkMinFunc func() uint16 + NetworkMaxFunc func() uint16 + ExtendedNetworkFunc func() bool +} + +func (m *MockPort) ShortString() string { return m.ShortStringFunc() } +func (m *MockPort) Start(router port.RouterHooks) error { return m.StartFunc(router) } +func (m *MockPort) Stop() error { return m.StopFunc() } +func (m *MockPort) Unicast(network uint16, node uint8, datagram ddp.Datagram) { + m.UnicastFunc(network, node, datagram) +} +func (m *MockPort) Broadcast(datagram ddp.Datagram) { m.BroadcastFunc(datagram) } +func (m *MockPort) Multicast(zoneName []byte, datagram ddp.Datagram) { + m.MulticastFunc(zoneName, datagram) +} +func (m *MockPort) SetNetworkRange(networkMin, networkMax uint16) error { + return m.SetNetworkRangeFunc(networkMin, networkMax) +} +func (m *MockPort) Network() uint16 { return m.NetworkFunc() } +func (m *MockPort) Node() uint8 { return m.NodeFunc() } +func (m *MockPort) NetworkMin() uint16 { return m.NetworkMinFunc() } +func (m *MockPort) NetworkMax() uint16 { return m.NetworkMaxFunc() } +func (m *MockPort) ExtendedNetwork() bool { return m.ExtendedNetworkFunc() } + +// NewMockPort returns a MockPort pre-wired with common constant accessors +// (network, node, short string, extended flag). Call-time behaviours +// (Unicast/Broadcast/etc.) remain unset and must be supplied by the test. +func NewMockPort(network uint16, node uint8, shortString string, isExtended bool) *MockPort { + return &MockPort{ + ShortStringFunc: func() string { return shortString }, + NetworkFunc: func() uint16 { return network }, + NodeFunc: func() uint8 { return node }, + NetworkMinFunc: func() uint16 { return network }, + NetworkMaxFunc: func() uint16 { return network }, + ExtendedNetworkFunc: func() bool { return isExtended }, + } +} diff --git a/internal/testutil/mock_router.go b/internal/testutil/mock_router.go new file mode 100644 index 0000000..dac9925 --- /dev/null +++ b/internal/testutil/mock_router.go @@ -0,0 +1,63 @@ +package testutil + +import ( + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/protocol/ddp" + "github.com/pgodw/omnitalk/service" +) + +// MockRouter is a fake service.Router whose behaviour is driven by func +// fields. Leave any field nil and its method is unsafe to call. +type MockRouter struct { + RouteFunc func(datagram ddp.Datagram, originating bool) error + ReplyFunc func(datagram ddp.Datagram, rxPort port.Port, ddpType uint8, data []byte) + PortsListFunc func() []port.Port + RoutingGetByNetworkFunc func(network uint16) (*service.RouteEntry, *bool) + RoutingEntriesFunc func() []struct { + Entry *service.RouteEntry + Bad bool + } + RoutingConsiderFunc func(entry *service.RouteEntry) bool + RoutingMarkBadFunc func(networkMin, networkMax uint16) bool + ZonesInNetworkRangeFunc func(networkMin uint16, networkMax *uint16) ([][]byte, error) + NetworksInZoneFunc func(zoneName []byte) []uint16 + ZonesFunc func() [][]byte + AddNetworksToZoneFunc func(zoneName []byte, networkMin uint16, networkMax *uint16) error + RoutingTableAgeFunc func() +} + +func (m *MockRouter) Route(datagram ddp.Datagram, originating bool) error { + return m.RouteFunc(datagram, originating) +} +func (m *MockRouter) Reply(datagram ddp.Datagram, rxPort port.Port, ddpType uint8, data []byte) { + m.ReplyFunc(datagram, rxPort, ddpType, data) +} +func (m *MockRouter) PortsList() []port.Port { return m.PortsListFunc() } +func (m *MockRouter) RoutingGetByNetwork(network uint16) (*service.RouteEntry, *bool) { + return m.RoutingGetByNetworkFunc(network) +} +func (m *MockRouter) RoutingEntries() []struct { + Entry *service.RouteEntry + Bad bool +} { + return m.RoutingEntriesFunc() +} +func (m *MockRouter) RoutingConsider(entry *service.RouteEntry) bool { + return m.RoutingConsiderFunc(entry) +} +func (m *MockRouter) RoutingMarkBad(networkMin, networkMax uint16) bool { + return m.RoutingMarkBadFunc(networkMin, networkMax) +} +func (m *MockRouter) ZonesInNetworkRange(networkMin uint16, networkMax *uint16) ([][]byte, error) { + return m.ZonesInNetworkRangeFunc(networkMin, networkMax) +} +func (m *MockRouter) NetworksInZone(zoneName []byte) []uint16 { return m.NetworksInZoneFunc(zoneName) } +func (m *MockRouter) Zones() [][]byte { return m.ZonesFunc() } +func (m *MockRouter) AddNetworksToZone(zoneName []byte, networkMin uint16, networkMax *uint16) error { + return m.AddNetworksToZoneFunc(zoneName, networkMin, networkMax) +} +func (m *MockRouter) RoutingTableAge() { m.RoutingTableAgeFunc() } + +// NewMockRouter returns a MockRouter with no behaviours wired up. Tests +// set the fields they need before use. +func NewMockRouter() *MockRouter { return &MockRouter{} } diff --git a/netlog/netlog.go b/netlog/netlog.go index 75a4fc3..a8b7261 100644 --- a/netlog/netlog.go +++ b/netlog/netlog.go @@ -1,22 +1,35 @@ -// Package netlog provides leveled logging and optional network traffic logging, +// Package netlog is OmniTalk's logging API. +// +// It is a thin facade over log/slog: cmd/omnitalk constructs a structured +// logger via pkg/logging and installs it here with SetLogger, then every +// service calls Debug/Info/Warn from this package. The facade keeps call +// sites short (no per-package logger plumbing) while still letting the +// process-wide handler decide formatting (console vs JSON) and level. +// +// Use this package for ordinary diagnostic logging. Use pkg/logging +// directly only when you need a *slog.Logger value (e.g. attaching +// structured fields with .With for the lifetime of an object). package netlog import ( + "context" "encoding/binary" "fmt" "log" + "log/slog" + "strings" "sync" - "github.com/pgodw/omnitalk/go/appletalk" + "github.com/pgodw/omnitalk/protocol/ddp" ) -// Level controls the minimum severity of messages that are emitted. +// Level mirrors the legacy three-value enum but maps onto slog.Level. type Level int const ( - LevelDebug Level = iota // log debug, info, and warn - LevelInfo // log info and warn (default) - LevelWarn // log warn only + LevelDebug Level = iota + LevelInfo + LevelWarn ) var ( @@ -24,18 +37,47 @@ var ( minLevel = LevelInfo ) -// SetLevel sets the minimum log level. Messages below this level are suppressed. -// Default is LevelInfo. +// logger is the slog instance the shim forwards through. It is +// deliberately separate from slog.Default(): netlog.SetLevel needs to +// gate Debug traffic without disturbing whatever handler the application +// has installed as the process-wide default. Callers that want +// structured output install a pkg/logging-built logger here via +// SetLogger; the zero value routes through slog.Default() with our own +// level gate out front. +var ( + loggerMu sync.RWMutex + logger *slog.Logger +) + +// SetLogger installs the logger that Debug/Info/Warn forward to. Passing +// nil reverts to slog.Default(). +func SetLogger(l *slog.Logger) { + loggerMu.Lock() + logger = l + loggerMu.Unlock() +} + +func activeLogger() *slog.Logger { + loggerMu.RLock() + l := logger + loggerMu.RUnlock() + if l != nil { + return l + } + return slog.Default() +} + +// SetLevel sets the minimum level. Kept for call-site compatibility; new +// code should configure pkg/logging sinks directly. func SetLevel(l Level) { levelMu.Lock() minLevel = l levelMu.Unlock() } -// ParseLevel converts a string ("debug", "info", "warn"/"warning") to a Level. -// Returns (level, true) on success or (LevelInfo, false) if unrecognised. +// ParseLevel accepts "debug" / "info" / "warn" / "warning". func ParseLevel(s string) (Level, bool) { - switch s { + switch strings.ToLower(strings.TrimSpace(s)) { case "debug": return LevelDebug, true case "info": @@ -53,39 +95,68 @@ func enabled(l Level) bool { return ok } -// Debug logs a debug-level message (suppressed unless level is LevelDebug). -func Debug(format string, args ...any) { - if enabled(LevelDebug) { - log.Printf("DEBUG "+format, args...) +func slogLevel(l Level) slog.Level { + switch l { + case LevelDebug: + return slog.LevelDebug + case LevelWarn: + return slog.LevelWarn + default: + return slog.LevelInfo } } -// Info logs an info-level message. -func Info(format string, args ...any) { - if enabled(LevelInfo) { - log.Printf("INFO "+format, args...) +// emit forwards to slog.Default(). Callers construct the root logger via +// pkg/logging and install it with logging.SetDefault; this shim simply +// adapts the legacy printf-style API onto slog. The netlog level gate +// remains so callers that call SetLevel(LevelDebug) still see debug lines +// even when slog.Default's handler is at Info — the shim uses +// slog.Log(level), which slog honours regardless of the handler's level +// as long as the handler is enabled at that level. +func emit(l Level, format string, args ...any) { + if !enabled(l) { + return } -} - -// Warn logs a warning-level message. -func Warn(format string, args ...any) { - if enabled(LevelWarn) { - log.Printf("WARN "+format, args...) + lg := activeLogger() + // When no custom logger is installed the shim falls back to stdlib + // log so the historical format (captured by tests via log.SetOutput) + // stays intact. As soon as main installs a pkg/logging logger via + // SetLogger, output shifts to the structured pipeline. + loggerMu.RLock() + custom := logger != nil + loggerMu.RUnlock() + if !custom { + var tag string + switch l { + case LevelDebug: + tag = "DEBUG " + case LevelWarn: + tag = "WARN " + default: + tag = "INFO " + } + log.Printf(tag+format, args...) + return } + lg.Log(context.Background(), slogLevel(l), fmt.Sprintf(format, args...)) } -// ShortStringer is implemented by ports that can provide a short description string. +// Debug / Info / Warn are the legacy entry points. They now route through +// slog.Default(); install a pkg/logging-constructed logger as default in +// main and you get structured output with source tags for free. +func Debug(format string, args ...any) { emit(LevelDebug, format, args...) } +func Info(format string, args ...any) { emit(LevelInfo, format, args...) } +func Warn(format string, args ...any) { emit(LevelWarn, format, args...) } + +// ShortStringer is implemented by ports that provide a short description. type ShortStringer interface { ShortString() string } -// LogFunc is a function that receives a single formatted network traffic log line. -// Pass a function such as func(s string) { Debug("%s", s) } to channel traffic -// output through the leveled logger. +// LogFunc receives a single formatted network traffic log line. type LogFunc func(string) -// NetLogger logs DDP datagrams and Ethernet/LocalTalk frames for debug purposes. -// Logging is disabled (no-op) until SetLogFunc is called with a non-nil function. +// NetLogger logs DDP datagrams and link-layer frames for debug purposes. type NetLogger struct { mu sync.Mutex fn LogFunc @@ -95,9 +166,6 @@ type NetLogger struct { } // SetLogFunc enables network traffic logging and sets the output function. -// Pass nil to disable. To enable, pass e.g.: -// -// netlog.SetLogFunc(func(s string) { netlog.Debug("%s", s) }) func (n *NetLogger) SetLogFunc(fn LogFunc) { n.mu.Lock() n.fn = fn @@ -131,7 +199,7 @@ func portName(p ShortStringer) string { return p.ShortString() } -func datagramHeader(d appletalk.Datagram) string { +func datagramHeader(d ddp.Datagram) string { return fmt.Sprintf("%2d %d.%-3d %d.%-3d %3d %3d %d", d.HopCount, d.DestinationNetwork, d.DestinationNode, @@ -156,27 +224,18 @@ func localtalkFrameHeader(frame []byte) string { return fmt.Sprintf("%3d %3d type %02X", frame[0], frame[1], frame[2]) } -// LogDatagramInbound logs an inbound DDP datagram. -func (n *NetLogger) LogDatagramInbound(network uint16, node uint8, d appletalk.Datagram, p ShortStringer) { +func (n *NetLogger) LogDatagramInbound(network uint16, node uint8, d ddp.Datagram, p ShortStringer) { n.emit(fmt.Sprintf("in to %d.%d", network, node), portName(p), datagramHeader(d), d.Data) } - -// LogDatagramUnicast logs an outbound unicast DDP datagram. -func (n *NetLogger) LogDatagramUnicast(network uint16, node uint8, d appletalk.Datagram, p ShortStringer) { +func (n *NetLogger) LogDatagramUnicast(network uint16, node uint8, d ddp.Datagram, p ShortStringer) { n.emit(fmt.Sprintf("out to %d.%d", network, node), portName(p), datagramHeader(d), d.Data) } - -// LogDatagramBroadcast logs an outbound broadcast DDP datagram. -func (n *NetLogger) LogDatagramBroadcast(d appletalk.Datagram, p ShortStringer) { +func (n *NetLogger) LogDatagramBroadcast(d ddp.Datagram, p ShortStringer) { n.emit("out broadcast", portName(p), datagramHeader(d), d.Data) } - -// LogDatagramMulticast logs an outbound multicast DDP datagram. -func (n *NetLogger) LogDatagramMulticast(zoneName []byte, d appletalk.Datagram, p ShortStringer) { +func (n *NetLogger) LogDatagramMulticast(zoneName []byte, d ddp.Datagram, p ShortStringer) { n.emit(fmt.Sprintf("out to %s", string(zoneName)), portName(p), datagramHeader(d), d.Data) } - -// LogEthernetFrameInbound logs an inbound Ethernet frame (payload only). func (n *NetLogger) LogEthernetFrameInbound(frame []byte, p ShortStringer) { if len(frame) < 14 { return @@ -188,8 +247,6 @@ func (n *NetLogger) LogEthernetFrameInbound(frame []byte, p ShortStringer) { } n.emit("frame in", portName(p), ethernetFrameHeader(frame), frame[14:end]) } - -// LogEthernetFrameOutbound logs an outbound Ethernet frame (payload only). func (n *NetLogger) LogEthernetFrameOutbound(frame []byte, p ShortStringer) { if len(frame) < 14 { return @@ -201,16 +258,12 @@ func (n *NetLogger) LogEthernetFrameOutbound(frame []byte, p ShortStringer) { } n.emit("frame out", portName(p), ethernetFrameHeader(frame), frame[14:end]) } - -// LogLocaltalkFrameInbound logs an inbound LocalTalk frame (payload only). func (n *NetLogger) LogLocaltalkFrameInbound(frame []byte, p ShortStringer) { if len(frame) < 3 { return } n.emit("frame in", portName(p), localtalkFrameHeader(frame), frame[3:]) } - -// LogLocaltalkFrameOutbound logs an outbound LocalTalk frame (payload only). func (n *NetLogger) LogLocaltalkFrameOutbound(frame []byte, p ShortStringer) { if len(frame) < 3 { return @@ -222,23 +275,18 @@ func (n *NetLogger) LogLocaltalkFrameOutbound(frame []byte, p ShortStringer) { var Default = &NetLogger{} // SetLogFunc configures the Default NetLogger's output function. -// Pass nil to disable. Example to enable at debug level: -// -// netlog.SetLogFunc(func(s string) { netlog.Debug("%s", s) }) func SetLogFunc(fn LogFunc) { Default.SetLogFunc(fn) } -// Package-level convenience wrappers around Default. - -func LogDatagramInbound(network uint16, node uint8, d appletalk.Datagram, p ShortStringer) { +func LogDatagramInbound(network uint16, node uint8, d ddp.Datagram, p ShortStringer) { Default.LogDatagramInbound(network, node, d, p) } -func LogDatagramUnicast(network uint16, node uint8, d appletalk.Datagram, p ShortStringer) { +func LogDatagramUnicast(network uint16, node uint8, d ddp.Datagram, p ShortStringer) { Default.LogDatagramUnicast(network, node, d, p) } -func LogDatagramBroadcast(d appletalk.Datagram, p ShortStringer) { +func LogDatagramBroadcast(d ddp.Datagram, p ShortStringer) { Default.LogDatagramBroadcast(d, p) } -func LogDatagramMulticast(zoneName []byte, d appletalk.Datagram, p ShortStringer) { +func LogDatagramMulticast(zoneName []byte, d ddp.Datagram, p ShortStringer) { Default.LogDatagramMulticast(zoneName, d, p) } func LogEthernetFrameInbound(frame []byte, p ShortStringer) { diff --git a/pkg/appledouble/appledouble.go b/pkg/appledouble/appledouble.go new file mode 100644 index 0000000..80cf6ce --- /dev/null +++ b/pkg/appledouble/appledouble.go @@ -0,0 +1,202 @@ +// Package appledouble implements the AppleDouble v2 sidecar file +// format used by macOS, netatalk 4.x, and Samba/CIFS to store +// resource forks and Finder metadata alongside regular files on +// non-HFS filesystems. The sidecar file is named "._" +// and lives in the same directory. +// +// This package is the format only: parse, build, and the constants. +// I/O strategy (where the sidecar lives, how it is opened, how +// metadata is grafted onto a host file) belongs to the caller. +// +// References: +// - AppleDouble / AppleSingle Formats, Apple II File Type Note $E0/0000 +// - netatalk 4.x source (afpd/unix.c, libatalk/adouble/) +// - macOS copyfile(3) / xattr behavior on SMB/CIFS mounts +package appledouble + +import ( + "encoding/binary" + "io" + "path/filepath" +) + +// Magic and version numbers from the AppleDouble spec. +const ( + Magic uint32 = 0x00051607 + Version uint32 = 0x00020000 +) + +// Entry IDs from the AppleSingle/AppleDouble spec. +const ( + EntryIDDataFork uint32 = 1 + EntryIDResourceFork uint32 = 2 + EntryIDComment uint32 = 4 + // EntryIDIconBW is the entry ID for a classic 32x32 1-bit + // Macintosh icon (netatalk adouble.h AD_ICON). The payload is + // 128 bytes of bitmap with no mask. + EntryIDIconBW uint32 = 5 + EntryIDFinderInfo uint32 = 9 +) + +// Layout sizes. +const ( + HeaderSize = 26 // magic(4)+version(4)+filler(16)+numEntries(2) + EntrySize = 12 // id(4)+offset(4)+length(4) + + // FinderInfoOffset is the byte offset of the FinderInfo payload + // in a canonical two-entry sidecar (FinderInfo + ResourceFork). + FinderInfoOffset uint32 = HeaderSize + 2*EntrySize // 50 + + // ResourceForkStart is the byte offset of the ResourceFork + // payload in a canonical two-entry sidecar. + ResourceForkStart uint32 = FinderInfoOffset + 32 // 82 + + // ResourceLenFileOffset is the byte offset of the ResourceFork + // entry's "length" field within the file for a canonical + // two-entry sidecar (FinderInfo + ResourceFork). + ResourceLenFileOffset int64 = HeaderSize + EntrySize + 8 // 46 +) + +// SidecarPath returns the modern (._name) sidecar path for filePath. +// Backend code may choose a different layout (e.g. legacy .AppleDouble). +func SidecarPath(filePath string) string { + return filepath.Join(filepath.Dir(filePath), "._"+filepath.Base(filePath)) +} + +// Parsed holds the contents of a decoded AppleDouble sidecar. +type Parsed struct { + FinderInfo [32]byte + Comment []byte + Resource []byte + IconBW []byte + // ResourceOffset is the byte offset within the sidecar at which + // the ResourceFork payload begins. + ResourceOffset int64 + // ResourceLenAt is the byte offset of the ResourceFork entry's + // length field within the sidecar header. Useful when patching + // resource length without rewriting the whole file. + ResourceLenAt int64 + HasFinder bool + HasComment bool + HasResource bool + HasIconBW bool +} + +// Parse decodes an AppleDouble sidecar's bytes. Returns +// io.ErrUnexpectedEOF for a short or malformed buffer. +func Parse(b []byte) (Parsed, error) { + var out Parsed + if len(b) < HeaderSize { + return out, io.ErrUnexpectedEOF + } + if binary.BigEndian.Uint32(b[0:4]) != Magic { + return out, io.ErrUnexpectedEOF + } + numEntries := int(binary.BigEndian.Uint16(b[24:26])) + entriesStart := HeaderSize + entriesLen := numEntries * EntrySize + if len(b) < entriesStart+entriesLen { + return out, io.ErrUnexpectedEOF + } + + for i := 0; i < numEntries; i++ { + off := entriesStart + i*EntrySize + id := binary.BigEndian.Uint32(b[off : off+4]) + eOff := int(binary.BigEndian.Uint32(b[off+4 : off+8])) + eLen := int(binary.BigEndian.Uint32(b[off+8 : off+12])) + if eOff < 0 || eLen < 0 || eOff+eLen > len(b) { + continue + } + switch id { + case EntryIDFinderInfo: + if eLen >= 32 { + copy(out.FinderInfo[:], b[eOff:eOff+32]) + out.HasFinder = true + } + case EntryIDComment: + if eLen > 0 { + out.Comment = append([]byte(nil), b[eOff:eOff+eLen]...) + out.HasComment = true + } + case EntryIDResourceFork: + out.ResourceOffset = int64(eOff) + out.ResourceLenAt = int64(off + 8) + if eLen > 0 { + out.Resource = append([]byte(nil), b[eOff:eOff+eLen]...) + } else { + out.Resource = nil + } + out.HasResource = true + case EntryIDIconBW: + if eLen > 0 { + out.IconBW = append([]byte(nil), b[eOff:eOff+eLen]...) + out.HasIconBW = true + } + case EntryIDDataFork: + // Not used by AFP servers; ignore. + } + } + return out, nil +} + +// Build encodes p into a canonical AppleDouble sidecar. The result +// always contains a FinderInfo entry and a ResourceFork entry; if +// includeCommentEntry is true, a Comment entry of commentLen bytes +// is inserted between them. +func Build(p Parsed, includeCommentEntry bool, commentLen uint32) []byte { + numEntries := 2 + if includeCommentEntry { + numEntries = 3 + } + headerLen := HeaderSize + numEntries*EntrySize + + finderOff := uint32(headerLen) + finderLen := uint32(32) + cur := finderOff + finderLen + + var commentOff uint32 + if includeCommentEntry { + commentOff = cur + cur += commentLen + } + + rsrcOff := cur + rsrcLen := uint32(len(p.Resource)) + total := int(rsrcOff + rsrcLen) + if total < int(rsrcOff) { + total = int(rsrcOff) + } + out := make([]byte, total) + + binary.BigEndian.PutUint32(out[0:4], Magic) + binary.BigEndian.PutUint32(out[4:8], Version) + binary.BigEndian.PutUint16(out[24:26], uint16(numEntries)) + + entriesStart := HeaderSize + putEntry := func(i int, id, off, ln uint32) { + base := entriesStart + i*EntrySize + binary.BigEndian.PutUint32(out[base:base+4], id) + binary.BigEndian.PutUint32(out[base+4:base+8], off) + binary.BigEndian.PutUint32(out[base+8:base+12], ln) + } + + putEntry(0, EntryIDFinderInfo, finderOff, finderLen) + if includeCommentEntry { + putEntry(1, EntryIDComment, commentOff, commentLen) + putEntry(2, EntryIDResourceFork, rsrcOff, rsrcLen) + } else { + putEntry(1, EntryIDResourceFork, rsrcOff, rsrcLen) + } + + if p.HasFinder { + copy(out[finderOff:finderOff+finderLen], p.FinderInfo[:]) + } + if includeCommentEntry && commentLen > 0 && len(p.Comment) > 0 { + copy(out[commentOff:commentOff+commentLen], p.Comment[:commentLen]) + } + if rsrcLen > 0 { + copy(out[rsrcOff:rsrcOff+rsrcLen], p.Resource) + } + + return out +} diff --git a/pkg/appledouble/appledouble_test.go b/pkg/appledouble/appledouble_test.go new file mode 100644 index 0000000..3dbb0f1 --- /dev/null +++ b/pkg/appledouble/appledouble_test.go @@ -0,0 +1,65 @@ +package appledouble + +import ( + "bytes" + "testing" +) + +func TestRoundTripFinderAndResource(t *testing.T) { + t.Parallel() + var fi [32]byte + copy(fi[:], "APPLMACS") + in := Parsed{ + FinderInfo: fi, + Resource: []byte("hello-rsrc"), + HasFinder: true, + HasResource: true, + } + raw := Build(in, false, 0) + out, err := Parse(raw) + if err != nil { + t.Fatalf("Parse: %v", err) + } + if !out.HasFinder || out.FinderInfo != fi { + t.Fatalf("FinderInfo round-trip mismatch: got %v", out.FinderInfo) + } + if !out.HasResource || !bytes.Equal(out.Resource, in.Resource) { + t.Fatalf("Resource round-trip mismatch") + } +} + +func TestRoundTripWithComment(t *testing.T) { + t.Parallel() + in := Parsed{ + Comment: []byte("hi"), + Resource: []byte("r"), + HasComment: true, + HasResource: true, + } + raw := Build(in, true, uint32(len(in.Comment))) + out, err := Parse(raw) + if err != nil { + t.Fatalf("Parse: %v", err) + } + if !out.HasComment || !bytes.Equal(out.Comment, in.Comment) { + t.Fatalf("Comment round-trip mismatch: got %q", out.Comment) + } +} + +func TestParseRejectsBadMagic(t *testing.T) { + t.Parallel() + b := make([]byte, HeaderSize) + if _, err := Parse(b); err == nil { + t.Fatal("expected error on bad magic") + } +} + +func TestSidecarPath(t *testing.T) { + t.Parallel() + got := SidecarPath("/Volumes/X/foo.txt") + want := "/Volumes/X/._foo.txt" + // On Windows, filepath.Join uses backslash; compare the basename. + if !bytes.HasSuffix([]byte(got), []byte("._foo.txt")) { + t.Fatalf("SidecarPath = %q, want suffix %q (full want=%q)", got, "._foo.txt", want) + } +} diff --git a/pkg/binutil/binutil.go b/pkg/binutil/binutil.go new file mode 100644 index 0000000..8fa897c --- /dev/null +++ b/pkg/binutil/binutil.go @@ -0,0 +1,164 @@ +// Package binutil provides allocation-free helpers for reading and +// writing fixed-endian wire formats used by AppleTalk and AFP packets. +// +// The package does not define Marshaler/Unmarshaler interfaces itself; +// those live at call sites where the concrete framing is known. The +// Wire interface below is the canonical shape: +// +// type Wire interface { +// MarshalWire(b []byte) (n int, err error) +// UnmarshalWire(b []byte) (n int, err error) +// WireSize() int +// } +// +// Implementations should return io.ErrShortBuffer when the buffer is +// too small, and a more specific error when the payload is malformed. +package binutil + +import ( + "encoding/binary" + "errors" + "io" +) + +// ErrShortBuffer is returned when a caller-supplied buffer is too +// small to hold the marshalled form, or too short to decode. +var ErrShortBuffer = io.ErrShortBuffer + +// ErrMalformed indicates that the bytes do not conform to the expected +// wire format (bad length prefix, invalid enum, etc.). +var ErrMalformed = errors.New("binutil: malformed wire data") + +// PutU8 writes v at b[0] and returns the number of bytes written. +// Returns ErrShortBuffer if len(b) < 1. +func PutU8(b []byte, v uint8) (int, error) { + if len(b) < 1 { + return 0, ErrShortBuffer + } + b[0] = v + return 1, nil +} + +// PutU16 writes v big-endian at b[0:2]. +func PutU16(b []byte, v uint16) (int, error) { + if len(b) < 2 { + return 0, ErrShortBuffer + } + binary.BigEndian.PutUint16(b, v) + return 2, nil +} + +// PutU32 writes v big-endian at b[0:4]. +func PutU32(b []byte, v uint32) (int, error) { + if len(b) < 4 { + return 0, ErrShortBuffer + } + binary.BigEndian.PutUint32(b, v) + return 4, nil +} + +// PutU64 writes v big-endian at b[0:8]. +func PutU64(b []byte, v uint64) (int, error) { + if len(b) < 8 { + return 0, ErrShortBuffer + } + binary.BigEndian.PutUint64(b, v) + return 8, nil +} + +// GetU8 reads a uint8 from b[0]. +func GetU8(b []byte) (uint8, int, error) { + if len(b) < 1 { + return 0, 0, ErrShortBuffer + } + return b[0], 1, nil +} + +// GetU16 reads a big-endian uint16 from b[0:2]. +func GetU16(b []byte) (uint16, int, error) { + if len(b) < 2 { + return 0, 0, ErrShortBuffer + } + return binary.BigEndian.Uint16(b), 2, nil +} + +// GetU32 reads a big-endian uint32 from b[0:4]. +func GetU32(b []byte) (uint32, int, error) { + if len(b) < 4 { + return 0, 0, ErrShortBuffer + } + return binary.BigEndian.Uint32(b), 4, nil +} + +// GetU64 reads a big-endian uint64 from b[0:8]. +func GetU64(b []byte) (uint64, int, error) { + if len(b) < 8 { + return 0, 0, ErrShortBuffer + } + return binary.BigEndian.Uint64(b), 8, nil +} + +// ByteWriter is the subset of bytes.Buffer / strings.Builder used by +// the Write* helpers below. Any io.Writer would do, but constraining +// to ByteWriter sidesteps the (n, err) plumbing for callers that +// already know writes to a memory buffer cannot fail. +type ByteWriter interface { + Write(p []byte) (int, error) + WriteByte(c byte) error +} + +// WriteU8 appends v to w. Errors from w are ignored: in-memory buffers +// (bytes.Buffer, strings.Builder) cannot fail, and these helpers exist +// to replace allocation-heavy binary.Write calls in hot paths. +func WriteU8(w ByteWriter, v uint8) { + _ = w.WriteByte(v) +} + +// WriteU16 appends a big-endian uint16 to w. +func WriteU16(w ByteWriter, v uint16) { + var b [2]byte + binary.BigEndian.PutUint16(b[:], v) + _, _ = w.Write(b[:]) +} + +// WriteU32 appends a big-endian uint32 to w. +func WriteU32(w ByteWriter, v uint32) { + var b [4]byte + binary.BigEndian.PutUint32(b[:], v) + _, _ = w.Write(b[:]) +} + +// WriteU64 appends a big-endian uint64 to w. +func WriteU64(w ByteWriter, v uint64) { + var b [8]byte + binary.BigEndian.PutUint64(b[:], v) + _, _ = w.Write(b[:]) +} + +// PutPString writes a length-prefixed Pascal string: 1 byte length +// followed by s. Returns ErrMalformed if len(s) > 255. +func PutPString(b []byte, s []byte) (int, error) { + if len(s) > 255 { + return 0, ErrMalformed + } + need := 1 + len(s) + if len(b) < need { + return 0, ErrShortBuffer + } + b[0] = uint8(len(s)) + copy(b[1:], s) + return need, nil +} + +// GetPString reads a length-prefixed Pascal string. The returned slice +// aliases b; callers that retain it across further writes must copy. +func GetPString(b []byte) ([]byte, int, error) { + if len(b) < 1 { + return nil, 0, ErrShortBuffer + } + n := int(b[0]) + if len(b) < 1+n { + return nil, 0, ErrShortBuffer + } + return b[1 : 1+n], 1 + n, nil +} diff --git a/pkg/binutil/binutil_test.go b/pkg/binutil/binutil_test.go new file mode 100644 index 0000000..614cf0d --- /dev/null +++ b/pkg/binutil/binutil_test.go @@ -0,0 +1,96 @@ +package binutil + +import ( + "bytes" + "errors" + "testing" +) + +func TestRoundTripFixedWidth(t *testing.T) { + t.Parallel() + b := make([]byte, 15) + off := 0 + for _, step := range []func() (int, error){ + func() (int, error) { return PutU8(b[off:], 0x12) }, + func() (int, error) { return PutU16(b[off:], 0x3456) }, + func() (int, error) { return PutU32(b[off:], 0x789ABCDE) }, + } { + n, err := step() + if err != nil { + t.Fatalf("put: %v", err) + } + off += n + } + if off != 7 { + t.Fatalf("offset = %d, want 7", off) + } + + off = 0 + u8, n, err := GetU8(b[off:]) + if err != nil || u8 != 0x12 { + t.Fatalf("GetU8: %x %v", u8, err) + } + off += n + u16, n, err := GetU16(b[off:]) + if err != nil || u16 != 0x3456 { + t.Fatalf("GetU16: %x %v", u16, err) + } + off += n + u32, _, err := GetU32(b[off:]) + if err != nil || u32 != 0x789ABCDE { + t.Fatalf("GetU32: %x %v", u32, err) + } +} + +func TestPStringRoundTrip(t *testing.T) { + t.Parallel() + b := make([]byte, 32) + in := []byte("Volume") + n, err := PutPString(b, in) + if err != nil { + t.Fatal(err) + } + if n != 1+len(in) { + t.Fatalf("n = %d, want %d", n, 1+len(in)) + } + + out, n2, err := GetPString(b) + if err != nil { + t.Fatal(err) + } + if n != n2 { + t.Fatalf("asymmetric n: put=%d get=%d", n, n2) + } + if !bytes.Equal(in, out) { + t.Fatalf("got %q, want %q", out, in) + } +} + +func TestShortBuffer(t *testing.T) { + t.Parallel() + if _, err := PutU32(make([]byte, 3), 0); !errors.Is(err, ErrShortBuffer) { + t.Fatalf("expected ErrShortBuffer, got %v", err) + } + if _, _, err := GetU16(make([]byte, 1)); !errors.Is(err, ErrShortBuffer) { + t.Fatalf("expected ErrShortBuffer, got %v", err) + } + if _, err := PutPString(make([]byte, 2), []byte("xxx")); !errors.Is(err, ErrShortBuffer) { + t.Fatalf("expected ErrShortBuffer, got %v", err) + } +} + +func TestPStringTooLong(t *testing.T) { + t.Parallel() + long := make([]byte, 256) + if _, err := PutPString(make([]byte, 300), long); !errors.Is(err, ErrMalformed) { + t.Fatalf("expected ErrMalformed, got %v", err) + } +} + +func BenchmarkPutU32(b *testing.B) { + buf := make([]byte, 4) + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, _ = PutU32(buf, uint32(i)) + } +} diff --git a/pkg/cnid/cnid.go b/pkg/cnid/cnid.go new file mode 100644 index 0000000..51a054d --- /dev/null +++ b/pkg/cnid/cnid.go @@ -0,0 +1,30 @@ +// Package cnid tracks the mapping between AFP Catalog Node IDs and +// current filesystem paths for a single volume. The package is AFP- +// agnostic — future services (macgarden, others) can reuse the Store +// interface and its in-memory and SQLite implementations without +// pulling in anything from service/afp. +package cnid + +const ( + // Invalid signals an error or "no CNID" sentinel. + Invalid uint32 = 0 + // ParentOfRoot is the synthetic parent of the root directory. + ParentOfRoot uint32 = 1 + // Root identifies a volume's root directory. + Root uint32 = 2 + // firstDynamic is the first CNID assignable to non-root objects. + firstDynamic uint32 = 3 +) + +// Store tracks CNID <-> path bindings. Implementations must be safe for +// concurrent use. Callers treat paths as opaque strings but are free to +// expect that path.Clean-equivalent normalisation happens internally. +type Store interface { + RootID() uint32 + Path(cnid uint32) (string, bool) + CNID(path string) (uint32, bool) + Ensure(path string) uint32 + EnsureReserved(path string, cnid uint32) uint32 + Rebind(oldPath, newPath string) + Remove(path string) +} diff --git a/pkg/cnid/memory.go b/pkg/cnid/memory.go new file mode 100644 index 0000000..ee2f1e0 --- /dev/null +++ b/pkg/cnid/memory.go @@ -0,0 +1,129 @@ +package cnid + +import ( + "path/filepath" + "strings" + "sync" +) + +// MemoryStore keeps CNIDs in memory for the lifetime of the process. It +// is the default backend when persistence is not required (tests, +// minimal builds, or callers that explicitly do not want a SQLite file). +type MemoryStore struct { + mu sync.RWMutex + cnidToPath map[uint32]string + pathToCNID map[string]uint32 + nextCNID uint32 +} + +func NewMemoryStore() *MemoryStore { + return &MemoryStore{ + cnidToPath: make(map[uint32]string), + pathToCNID: make(map[string]uint32), + nextCNID: firstDynamic, + } +} + +func (s *MemoryStore) RootID() uint32 { return Root } + +func (s *MemoryStore) Path(cnid uint32) (string, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + path, ok := s.cnidToPath[cnid] + return path, ok +} + +func (s *MemoryStore) CNID(path string) (uint32, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + cnid, ok := s.pathToCNID[filepath.Clean(path)] + return cnid, ok +} + +func (s *MemoryStore) Ensure(path string) uint32 { + path = filepath.Clean(path) + + s.mu.Lock() + defer s.mu.Unlock() + + if cnid, ok := s.pathToCNID[path]; ok { + return cnid + } + + cnid := s.nextAvailableCNIDLocked() + s.cnidToPath[cnid] = path + s.pathToCNID[path] = cnid + return cnid +} + +func (s *MemoryStore) EnsureReserved(path string, cnid uint32) uint32 { + path = filepath.Clean(path) + + s.mu.Lock() + defer s.mu.Unlock() + + if existing, ok := s.pathToCNID[path]; ok { + return existing + } + if existingPath, ok := s.cnidToPath[cnid]; ok && existingPath != path { + delete(s.pathToCNID, existingPath) + } + + s.cnidToPath[cnid] = path + s.pathToCNID[path] = cnid + if cnid >= s.nextCNID { + s.nextCNID = cnid + 1 + if s.nextCNID < firstDynamic { + s.nextCNID = firstDynamic + } + } + return cnid +} + +func (s *MemoryStore) Rebind(oldPath, newPath string) { + oldPath = filepath.Clean(oldPath) + newPath = filepath.Clean(newPath) + prefix := oldPath + string(filepath.Separator) + + s.mu.Lock() + defer s.mu.Unlock() + + for cnid, path := range s.cnidToPath { + if path != oldPath && !strings.HasPrefix(path, prefix) { + continue + } + suffix := strings.TrimPrefix(path, oldPath) + mapped := filepath.Clean(newPath + suffix) + delete(s.pathToCNID, path) + s.cnidToPath[cnid] = mapped + s.pathToCNID[mapped] = cnid + } +} + +func (s *MemoryStore) Remove(path string) { + path = filepath.Clean(path) + prefix := path + string(filepath.Separator) + + s.mu.Lock() + defer s.mu.Unlock() + + for cnid, current := range s.cnidToPath { + if current == path || strings.HasPrefix(current, prefix) { + delete(s.cnidToPath, cnid) + delete(s.pathToCNID, current) + } + } +} + +func (s *MemoryStore) nextAvailableCNIDLocked() uint32 { + for { + cnid := s.nextCNID + s.nextCNID++ + if cnid < firstDynamic { + continue + } + if _, exists := s.cnidToPath[cnid]; !exists { + return cnid + } + } +} diff --git a/pkg/cnid/memory_test.go b/pkg/cnid/memory_test.go new file mode 100644 index 0000000..01dfda1 --- /dev/null +++ b/pkg/cnid/memory_test.go @@ -0,0 +1,86 @@ +package cnid + +import ( + "path/filepath" + "testing" +) + +func TestMemoryStoreEnsureAndLookup(t *testing.T) { + t.Parallel() + s := NewMemoryStore() + if s.RootID() != Root { + t.Fatalf("RootID = %d, want %d", s.RootID(), Root) + } + + a := s.Ensure("dir/foo") + if a < firstDynamic { + t.Fatalf("Ensure returned reserved CNID %d", a) + } + if got := s.Ensure("dir/foo"); got != a { + t.Fatalf("Ensure not idempotent: %d vs %d", got, a) + } + if got, ok := s.CNID("dir/foo"); !ok || got != a { + t.Fatalf("CNID lookup: got=%d ok=%v, want %d", got, ok, a) + } + want := filepath.Clean("dir/foo") + if got, ok := s.Path(a); !ok || got != want { + t.Fatalf("Path lookup: got=%q want=%q ok=%v", got, want, ok) + } +} + +func TestMemoryStoreRebindPrefix(t *testing.T) { + t.Parallel() + s := NewMemoryStore() + root := s.Ensure("a") + child := s.Ensure("a/b/c") + + s.Rebind("a", "x") + + if got, ok := s.Path(root); !ok || got != "x" { + t.Fatalf("root path after rebind: got=%q ok=%v", got, ok) + } + wantChild := filepath.Clean("x/b/c") + if got, ok := s.Path(child); !ok || got != wantChild { + t.Fatalf("child path after rebind: got=%q want=%q ok=%v", got, wantChild, ok) + } + if _, ok := s.CNID("a/b/c"); ok { + t.Fatal("old path still resolvable after rebind") + } +} + +func TestMemoryStoreRemoveSubtree(t *testing.T) { + t.Parallel() + s := NewMemoryStore() + keep := s.Ensure("keep") + s.Ensure("drop") + s.Ensure("drop/child") + + s.Remove("drop") + + if _, ok := s.CNID("drop"); ok { + t.Error("drop not removed") + } + if _, ok := s.CNID("drop/child"); ok { + t.Error("drop/child not removed") + } + if _, ok := s.Path(keep); !ok { + t.Error("keep was incorrectly removed") + } +} + +func TestMemoryStoreEnsureReserved(t *testing.T) { + t.Parallel() + s := NewMemoryStore() + got := s.EnsureReserved("foo", 100) + if got != 100 { + t.Fatalf("EnsureReserved = %d, want 100", got) + } + if path, ok := s.Path(100); !ok || path != "foo" { + t.Fatalf("Path(100) = %q %v", path, ok) + } + // Subsequent Ensure should skip 100. + next := s.Ensure("bar") + if next == 100 { + t.Fatal("Ensure collided with reserved CNID") + } +} diff --git a/pkg/cnid/sqlite.go b/pkg/cnid/sqlite.go new file mode 100644 index 0000000..30f9646 --- /dev/null +++ b/pkg/cnid/sqlite.go @@ -0,0 +1,285 @@ +//go:build sqlite_cnid || all + +package cnid + +import ( + "database/sql" + "fmt" + "log/slog" + "os" + "path/filepath" + "strings" + "sync" + + _ "modernc.org/sqlite" +) + +// SQLiteFilename is the standard CNID database filename dropped at the +// root of a volume. +const SQLiteFilename = "_.afp.db" + +// SQLitePath returns the canonical location of the CNID database file +// for a volume whose filesystem root is volumeRootPath. +func SQLitePath(volumeRootPath string) string { + return filepath.Join(filepath.Clean(volumeRootPath), SQLiteFilename) +} + +// OpenSQLiteDB opens (creating if necessary) the CNID SQLite database +// for a volume at volumeRootPath. It is exported so callers that want +// to share a *sql.DB between CNID and other per-volume metadata (e.g. +// Desktop DB) can do so. +func OpenSQLiteDB(volumeRootPath string) (*sql.DB, error) { + dbPath := SQLitePath(volumeRootPath) + if err := os.MkdirAll(filepath.Dir(dbPath), 0o755); err != nil { + return nil, fmt.Errorf("create sqlite dir for %q: %w", dbPath, err) + } + db, err := sql.Open("sqlite", dbPath) + if err != nil { + return nil, fmt.Errorf("open sqlite db %q: %w", dbPath, err) + } + // Single-writer access pattern keeps behaviour deterministic under + // concurrent AFP operations and avoids Windows lock contention. + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(0) + + for _, stmt := range []string{ + "PRAGMA journal_mode=WAL", + "PRAGMA synchronous=NORMAL", + "PRAGMA foreign_keys=ON", + "PRAGMA busy_timeout=5000", + } { + if _, execErr := db.Exec(stmt); execErr != nil { + db.Close() + return nil, fmt.Errorf("sqlite pragma %q on %q: %w", stmt, dbPath, execErr) + } + } + + slog.Default().Info("opened cnid sqlite database", "path", dbPath, "source", "CNID") + return db, nil +} + +// SQLiteStore persists CNIDs in a per-volume SQLite database. +type SQLiteStore struct { + mu sync.Mutex + db *sql.DB +} + +// NewSQLiteStore opens (or creates) the CNID database under volumeRootPath. +func NewSQLiteStore(volumeRootPath string) (*SQLiteStore, error) { + db, err := OpenSQLiteDB(volumeRootPath) + if err != nil { + return nil, err + } + store := &SQLiteStore{db: db} + if err := store.initSchema(); err != nil { + db.Close() + return nil, err + } + return store, nil +} + +func (s *SQLiteStore) initSchema() error { + _, err := s.db.Exec(` + CREATE TABLE IF NOT EXISTS cnid_paths ( + cnid INTEGER PRIMARY KEY, + path TEXT NOT NULL UNIQUE + ); + CREATE INDEX IF NOT EXISTS idx_cnid_paths_path ON cnid_paths(path); + `) + return err +} + +func (s *SQLiteStore) RootID() uint32 { return Root } + +func (s *SQLiteStore) Path(cnid uint32) (string, bool) { + var path string + err := s.db.QueryRow("SELECT path FROM cnid_paths WHERE cnid = ?", cnid).Scan(&path) + if err != nil { + return "", false + } + return path, true +} + +func (s *SQLiteStore) CNID(path string) (uint32, bool) { + path = filepath.Clean(path) + var cnid uint32 + err := s.db.QueryRow("SELECT cnid FROM cnid_paths WHERE path = ?", path).Scan(&cnid) + if err != nil { + return 0, false + } + return cnid, true +} + +func (s *SQLiteStore) Ensure(path string) uint32 { + path = filepath.Clean(path) + + s.mu.Lock() + defer s.mu.Unlock() + + tx, err := s.db.Begin() + if err != nil { + return Invalid + } + defer tx.Rollback() + + if cnid, ok := selectCNIDByPathTx(tx, path); ok { + _ = tx.Commit() + return cnid + } + + cnid, err := nextAvailableCNIDTx(tx) + if err != nil { + return Invalid + } + if _, err := tx.Exec("INSERT INTO cnid_paths(cnid, path) VALUES(?, ?)", cnid, path); err != nil { + return Invalid + } + if err := tx.Commit(); err != nil { + return Invalid + } + return cnid +} + +func (s *SQLiteStore) EnsureReserved(path string, cnid uint32) uint32 { + path = filepath.Clean(path) + + s.mu.Lock() + defer s.mu.Unlock() + + tx, err := s.db.Begin() + if err != nil { + return Invalid + } + defer tx.Rollback() + + if existing, ok := selectCNIDByPathTx(tx, path); ok { + _ = tx.Commit() + return existing + } + + if existingPath, ok := selectPathByCNIDTx(tx, cnid); ok && existingPath != path { + if _, err := tx.Exec("DELETE FROM cnid_paths WHERE cnid = ?", cnid); err != nil { + return Invalid + } + } + + if _, err := tx.Exec("INSERT INTO cnid_paths(cnid, path) VALUES(?, ?)", cnid, path); err != nil { + return Invalid + } + if err := tx.Commit(); err != nil { + return Invalid + } + return cnid +} + +func (s *SQLiteStore) Rebind(oldPath, newPath string) { + oldPath = filepath.Clean(oldPath) + newPath = filepath.Clean(newPath) + prefix := oldPath + string(filepath.Separator) + + s.mu.Lock() + defer s.mu.Unlock() + + tx, err := s.db.Begin() + if err != nil { + return + } + defer tx.Rollback() + + rows, err := tx.Query("SELECT cnid, path FROM cnid_paths") + if err != nil { + return + } + defer rows.Close() + + type row struct { + cnid uint32 + path string + } + var updates []row + for rows.Next() { + var r row + if err := rows.Scan(&r.cnid, &r.path); err != nil { + return + } + if r.path != oldPath && !strings.HasPrefix(r.path, prefix) { + continue + } + updates = append(updates, r) + } + for _, r := range updates { + suffix := strings.TrimPrefix(r.path, oldPath) + mapped := filepath.Clean(newPath + suffix) + if _, err := tx.Exec("UPDATE cnid_paths SET path = ? WHERE cnid = ?", mapped, r.cnid); err != nil { + return + } + } + _ = tx.Commit() +} + +func (s *SQLiteStore) Remove(path string) { + path = filepath.Clean(path) + prefix := path + string(filepath.Separator) + + s.mu.Lock() + defer s.mu.Unlock() + + tx, err := s.db.Begin() + if err != nil { + return + } + defer tx.Rollback() + + rows, err := tx.Query("SELECT cnid, path FROM cnid_paths") + if err != nil { + return + } + defer rows.Close() + + var toDelete []uint32 + for rows.Next() { + var cnid uint32 + var current string + if err := rows.Scan(&cnid, ¤t); err != nil { + return + } + if current == path || strings.HasPrefix(current, prefix) { + toDelete = append(toDelete, cnid) + } + } + for _, cnid := range toDelete { + if _, err := tx.Exec("DELETE FROM cnid_paths WHERE cnid = ?", cnid); err != nil { + return + } + } + _ = tx.Commit() +} + +func selectCNIDByPathTx(tx *sql.Tx, path string) (uint32, bool) { + var cnid uint32 + err := tx.QueryRow("SELECT cnid FROM cnid_paths WHERE path = ?", path).Scan(&cnid) + if err != nil { + return 0, false + } + return cnid, true +} + +func selectPathByCNIDTx(tx *sql.Tx, cnid uint32) (string, bool) { + var path string + err := tx.QueryRow("SELECT path FROM cnid_paths WHERE cnid = ?", cnid).Scan(&path) + if err != nil { + return "", false + } + return path, true +} + +func nextAvailableCNIDTx(tx *sql.Tx) (uint32, error) { + var maxCNID uint32 + if err := tx.QueryRow("SELECT COALESCE(MAX(cnid), 0) FROM cnid_paths").Scan(&maxCNID); err != nil { + return 0, err + } + if maxCNID < firstDynamic-1 { + return firstDynamic, nil + } + return maxCNID + 1, nil +} diff --git a/pkg/cnid/sqlite_stub.go b/pkg/cnid/sqlite_stub.go new file mode 100644 index 0000000..71c9560 --- /dev/null +++ b/pkg/cnid/sqlite_stub.go @@ -0,0 +1,49 @@ +//go:build !sqlite_cnid && !all + +package cnid + +import ( + "database/sql" + "errors" + "path/filepath" +) + +// SQLiteFilename is the standard CNID database filename dropped at the +// root of a volume. The constant remains exported in stub builds so +// callers can detect/skip the sidecar regardless of which CNID backend +// is compiled in. +const SQLiteFilename = "_.afp.db" + +// ErrSQLiteDisabled is returned by SQLite-backed constructors when the +// binary is built without the "sqlite_cnid" build tag. Callers should +// fall back to MemoryStore. +var ErrSQLiteDisabled = errors.New("sqlite CNID backend not built; rebuild with -tags sqlite_cnid") + +// SQLitePath returns the canonical CNID database location even in stub +// builds so callers that filter the sidecar by name keep working. +func SQLitePath(volumeRootPath string) string { + return filepath.Join(filepath.Clean(volumeRootPath), SQLiteFilename) +} + +// SQLiteStore is a stub type so external alias declarations +// (e.g. service/afp.SQLiteCNIDStore) keep compiling under !sqlite_cnid. +// The real implementation lives in sqlite.go behind //go:build sqlite_cnid. +// +// All methods are no-ops; the stub is only ever returned alongside +// ErrSQLiteDisabled, so callers fall back to MemoryStore before any +// method is invoked. +type SQLiteStore struct{} + +func (*SQLiteStore) RootID() uint32 { return Root } +func (*SQLiteStore) Path(_ uint32) (string, bool) { return "", false } +func (*SQLiteStore) CNID(_ string) (uint32, bool) { return 0, false } +func (*SQLiteStore) Ensure(_ string) uint32 { return 0 } +func (*SQLiteStore) EnsureReserved(_ string, cnid uint32) uint32 { return cnid } +func (*SQLiteStore) Rebind(_ string, _ string) {} +func (*SQLiteStore) Remove(_ string) {} + +// OpenSQLiteDB always returns ErrSQLiteDisabled in stub builds. +func OpenSQLiteDB(_ string) (*sql.DB, error) { return nil, ErrSQLiteDisabled } + +// NewSQLiteStore always returns ErrSQLiteDisabled in stub builds. +func NewSQLiteStore(_ string) (*SQLiteStore, error) { return nil, ErrSQLiteDisabled } diff --git a/pkg/encoding/doc.go b/pkg/encoding/doc.go new file mode 100644 index 0000000..94f5a0e --- /dev/null +++ b/pkg/encoding/doc.go @@ -0,0 +1,5 @@ +// Package encoding provides AppleTalk-adjacent character-set codecs — +// primarily MacRoman, for conversion between classic Mac OS text and +// modern UTF-8. Lives under pkg/ because it has no AppleTalk-specific +// state and is reusable outside this project. +package encoding diff --git a/encoding/macroman.go b/pkg/encoding/macroman.go similarity index 100% rename from encoding/macroman.go rename to pkg/encoding/macroman.go diff --git a/appletalk/macroman_test.go b/pkg/encoding/macroman_test.go similarity index 93% rename from appletalk/macroman_test.go rename to pkg/encoding/macroman_test.go index 0edf4ef..4025543 100644 --- a/appletalk/macroman_test.go +++ b/pkg/encoding/macroman_test.go @@ -1,4 +1,4 @@ -package appletalk +package encoding import ( "bytes" @@ -6,6 +6,7 @@ import ( ) func TestMacRomanToUpper(t *testing.T) { + t.Parallel() // Re-implement the old logic for a correctness check atalkLower := []byte("abcdefghijklmnopqrstuvwxyz\x88\x8A\x8B\x8C\x8D\x8E\x96\x9A\x9B\x9F\xBE\xBF\xCF") atalkUpper := []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ\xCB\x80\xCC\x81\x82\x83\x84\x85\xCD\x86\xAE\xAF\xCE") @@ -23,8 +24,7 @@ func TestMacRomanToUpper(t *testing.T) { return out } - // Test all 256 bytes - for i := 0; i < 256; i++ { + for i := range 256 { input := []byte{byte(i)} expected := oldUCase(input) actual := MacRomanToUpper(input) @@ -33,7 +33,6 @@ func TestMacRomanToUpper(t *testing.T) { } } - // Test a string input := []byte("Hello, AppleTalk Zone\x88\x8A!") expected := oldUCase(input) actual := MacRomanToUpper(input) @@ -43,6 +42,7 @@ func TestMacRomanToUpper(t *testing.T) { } func TestMacRomanToUTF8(t *testing.T) { + t.Parallel() input := []byte{'M', 'a', 'c', ' ', '\x80', '\x81', '\x82'} expected := "Mac ÄÅÇ" actual := MacRomanToUTF8(input) @@ -52,6 +52,7 @@ func TestMacRomanToUTF8(t *testing.T) { } func TestUTF8ToMacRoman(t *testing.T) { + t.Parallel() input := "Mac ÄÅÇ" expected := []byte{'M', 'a', 'c', ' ', '\x80', '\x81', '\x82'} actual := UTF8ToMacRoman(input) @@ -59,7 +60,6 @@ func TestUTF8ToMacRoman(t *testing.T) { t.Errorf("UTF8ToMacRoman failed: expected %x, got %x", expected, actual) } - // Test fallback for unmappable input2 := "Mac 🤔" expected2 := []byte{'M', 'a', 'c', ' ', '?'} actual2 := UTF8ToMacRoman(input2) diff --git a/pkg/hwaddr/hwaddr.go b/pkg/hwaddr/hwaddr.go new file mode 100644 index 0000000..ce844c6 --- /dev/null +++ b/pkg/hwaddr/hwaddr.go @@ -0,0 +1,194 @@ +// Package hwaddr provides unified hardware-address types covering Ethernet +// (EUI-48), LocalTalk (8-bit LLAP node ID), and AppleTalk (24-bit DDP +// address), plus parsing, formatting, generation, and conversion between +// them. It replaces ad-hoc helpers previously scattered across cmd/omnitalk, +// port/ethertalk, port/localtalk, and service/macip. +package hwaddr + +import ( + "encoding/hex" + "fmt" + "math/rand" + "net" + "strings" +) + +// Ethernet is a 48-bit EUI-48 hardware address. +type Ethernet [6]byte + +// LocalTalk is an 8-bit LLAP node identifier. Values 0 and 0xFF are reserved +// (invalid / broadcast). Nodes 1–127 are the "user" range; 128–254 are the +// "server" range that servers prefer when self-assigning. +type LocalTalk uint8 + +// AppleTalk is a 24-bit DDP address (16-bit network + 8-bit node). +type AppleTalk struct { + Network uint16 + Node uint8 +} + +// AppleOUI is Apple's registered IEEE OUI; used as the default prefix when +// synthesising Ethernet addresses from AppleTalk addresses. +var AppleOUI = [3]byte{0x00, 0x00, 0x07} + +// MacIPOUI is the locally administered prefix historically used by OmniTalk's +// MacIP gateway to fabricate per-node MACs for DHCP. Bit 1 of the first octet +// is set, marking the address as locally administered. +var MacIPOUI = [3]byte{0x02, 0x00, 0x00} + +// ParseEthernet accepts 12 hex digits with optional `:` or `-` separators. +func ParseEthernet(s string) (Ethernet, error) { + var out Ethernet + normalized := strings.ReplaceAll(strings.ReplaceAll(strings.TrimSpace(s), ":", ""), "-", "") + if len(normalized) != 12 { + return out, fmt.Errorf("ethernet address: want 12 hex digits, got %d", len(normalized)) + } + b, err := hex.DecodeString(normalized) + if err != nil { + return out, fmt.Errorf("ethernet address: %w", err) + } + copy(out[:], b) + return out, nil +} + +// String renders as colon-separated lowercase hex (`de:ad:be:ef:ca:fe`). +func (e Ethernet) String() string { + return net.HardwareAddr(e[:]).String() +} + +// Bytes returns a copy of the raw 6-byte form. +func (e Ethernet) Bytes() []byte { + out := make([]byte, 6) + copy(out, e[:]) + return out +} + +// HardwareAddr adapts to net.HardwareAddr for stdlib APIs. +func (e Ethernet) HardwareAddr() net.HardwareAddr { + return net.HardwareAddr(e.Bytes()) +} + +// EthernetFromBytes constructs an Ethernet from a 6-byte slice. +func EthernetFromBytes(b []byte) (Ethernet, error) { + var out Ethernet + if len(b) != 6 { + return out, fmt.Errorf("ethernet address: want 6 bytes, got %d", len(b)) + } + copy(out[:], b) + return out, nil +} + +// ParseLocalTalk parses `0x`, `0`, or decimal forms. +func ParseLocalTalk(s string) (LocalTalk, error) { + s = strings.TrimSpace(s) + var n uint64 + var err error + switch { + case strings.HasPrefix(s, "0x"), strings.HasPrefix(s, "0X"): + _, err = fmt.Sscanf(s[2:], "%x", &n) + default: + _, err = fmt.Sscanf(s, "%d", &n) + } + if err != nil { + return 0, fmt.Errorf("localtalk node: %w", err) + } + if n > 0xFF { + return 0, fmt.Errorf("localtalk node: %d out of range", n) + } + return LocalTalk(n), nil +} + +// String renders as `0x`. +func (n LocalTalk) String() string { return fmt.Sprintf("0x%02X", uint8(n)) } + +// Valid reports whether n is a usable unicast node id (not 0, not 0xFF). +func (n LocalTalk) Valid() bool { return n != 0 && n != 0xFF } + +// IsServerRange reports whether n is in the server-preferred range (128–254). +func (n LocalTalk) IsServerRange() bool { return n >= 128 && n <= 254 } + +// GenerateEthernet fabricates an Ethernet address by filling the last three +// octets with random bytes from r (using math/rand.Read if r is nil). +func GenerateEthernet(oui [3]byte, r *rand.Rand) Ethernet { + var e Ethernet + e[0], e[1], e[2] = oui[0], oui[1], oui[2] + var tail [3]byte + if r == nil { + r = rand.New(rand.NewSource(rand.Int63())) + } + for i := range tail { + tail[i] = byte(r.Intn(256)) + } + e[3], e[4], e[5] = tail[0], tail[1], tail[2] + return e +} + +// GenerateLocalTalk returns a shuffled candidate list of LocalTalk node ids +// suitable for self-assignment. If preferred is non-empty its entries are +// tried first in the order given; the remaining valid node ids follow in +// shuffled order. If r is nil, math/rand's default source is used. +// +// Server callers should pass preferred ids in the 128–254 range so they +// claim server-range addresses before falling back to client-range ones. +func GenerateLocalTalk(preferred []LocalTalk, r *rand.Rand) []LocalTalk { + seen := make(map[LocalTalk]bool, 254) + out := make([]LocalTalk, 0, 254) + for _, p := range preferred { + if !p.Valid() || seen[p] { + continue + } + seen[p] = true + out = append(out, p) + } + rest := make([]LocalTalk, 0, 254) + for i := 1; i <= 254; i++ { + id := LocalTalk(i) + if seen[id] { + continue + } + rest = append(rest, id) + } + shuffle := rand.Shuffle + if r != nil { + shuffle = r.Shuffle + } + shuffle(len(rest), func(i, j int) { rest[i], rest[j] = rest[j], rest[i] }) + return append(out, rest...) +} + +// EthernetFromAppleTalk synthesises an Ethernet address encoding the given +// AppleTalk address in the low 24 bits. The conversion is deterministic and +// reversible via AppleTalkFromEthernet using the same oui. +// +// Layout: [oui[0] oui[1] oui[2] netHi netLo node]. +func EthernetFromAppleTalk(oui [3]byte, a AppleTalk) Ethernet { + var e Ethernet + e[0], e[1], e[2] = oui[0], oui[1], oui[2] + e[3] = byte(a.Network >> 8) + e[4] = byte(a.Network) + e[5] = a.Node + return e +} + +// AppleTalkFromEthernet recovers the AppleTalk address previously encoded +// by EthernetFromAppleTalk. Returns ok=false if the OUI prefix does not +// match. +func AppleTalkFromEthernet(oui [3]byte, e Ethernet) (AppleTalk, bool) { + if e[0] != oui[0] || e[1] != oui[1] || e[2] != oui[2] { + return AppleTalk{}, false + } + return AppleTalk{ + Network: uint16(e[3])<<8 | uint16(e[4]), + Node: e[5], + }, true +} + +// MacIPEthernetFromAppleTalk is the MacIP-gateway-specific address +// synthesis used for DHCP client identity on behalf of AppleTalk nodes. +// Layout: 0x02 (locally administered) | netHi | netLo | node | 'M' | 'I'. +// The suffix "MI" distinguishes these addresses from generic AARP-style +// syntheses and preserves wire-level compatibility with existing DHCP +// leases issued against OmniTalk MacIP. +func MacIPEthernetFromAppleTalk(a AppleTalk) Ethernet { + return Ethernet{0x02, byte(a.Network >> 8), byte(a.Network), a.Node, 'M', 'I'} +} diff --git a/pkg/hwaddr/hwaddr_test.go b/pkg/hwaddr/hwaddr_test.go new file mode 100644 index 0000000..d25a522 --- /dev/null +++ b/pkg/hwaddr/hwaddr_test.go @@ -0,0 +1,104 @@ +package hwaddr + +import ( + "testing" +) + +func TestParseEthernetRoundTrip(t *testing.T) { + t.Parallel() + cases := []string{"de:ad:be:ef:ca:fe", "DE-AD-BE-EF-CA-FE", "deadbeefcafe"} + want := Ethernet{0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe} + for _, s := range cases { + got, err := ParseEthernet(s) + if err != nil { + t.Fatalf("ParseEthernet(%q): %v", s, err) + } + if got != want { + t.Errorf("ParseEthernet(%q) = %v, want %v", s, got, want) + } + } + if got := want.String(); got != "de:ad:be:ef:ca:fe" { + t.Errorf("Ethernet.String = %q", got) + } +} + +func TestParseEthernetErrors(t *testing.T) { + t.Parallel() + for _, s := range []string{"", "zz:zz:zz:zz:zz:zz", "de:ad:be:ef"} { + if _, err := ParseEthernet(s); err == nil { + t.Errorf("ParseEthernet(%q) expected error", s) + } + } +} + +func TestLocalTalkParse(t *testing.T) { + t.Parallel() + cases := map[string]LocalTalk{"0xFE": 0xFE, "0x01": 1, "128": 128, "254": 254} + for in, want := range cases { + got, err := ParseLocalTalk(in) + if err != nil { + t.Fatalf("ParseLocalTalk(%q): %v", in, err) + } + if got != want { + t.Errorf("ParseLocalTalk(%q) = %v, want %v", in, got, want) + } + } +} + +func TestLocalTalkValidity(t *testing.T) { + t.Parallel() + if LocalTalk(0).Valid() || LocalTalk(0xFF).Valid() { + t.Error("reserved ids should be invalid") + } + if !LocalTalk(1).Valid() || !LocalTalk(200).Valid() { + t.Error("unicast ids should be valid") + } + if !LocalTalk(200).IsServerRange() || LocalTalk(50).IsServerRange() { + t.Error("IsServerRange boundary wrong") + } +} + +func TestAppleTalkEthernetRoundTrip(t *testing.T) { + t.Parallel() + oui := MacIPOUI + for n := 0; n < 0x10000; n += 257 { + for _, node := range []uint8{1, 42, 0x80, 0xFD, 0xFE} { + a := AppleTalk{Network: uint16(n), Node: node} + e := EthernetFromAppleTalk(oui, a) + got, ok := AppleTalkFromEthernet(oui, e) + if !ok || got != a { + t.Fatalf("round-trip failed for %+v: got %+v ok=%v", a, got, ok) + } + } + } +} + +func TestAppleTalkFromEthernetRejectsWrongOUI(t *testing.T) { + t.Parallel() + e := EthernetFromAppleTalk(MacIPOUI, AppleTalk{Network: 1, Node: 2}) + if _, ok := AppleTalkFromEthernet(AppleOUI, e); ok { + t.Error("expected mismatched OUI to return ok=false") + } +} + +func TestGenerateLocalTalkPreferredFirst(t *testing.T) { + t.Parallel() + preferred := []LocalTalk{200, 201, 0xFF, 200} // 0xFF invalid, dup ignored + out := GenerateLocalTalk(preferred, nil) + if out[0] != 200 || out[1] != 201 { + t.Errorf("expected preferred ids first, got %v", out[:2]) + } + if len(out) != 254 { + t.Errorf("expected 254 candidate ids, got %d", len(out)) + } + seen := map[LocalTalk]bool{} + for _, id := range out { + if !id.Valid() { + t.Errorf("generated id %v is invalid", id) + } + if seen[id] { + t.Errorf("duplicate id %v", id) + } + seen[id] = true + } +} diff --git a/pkg/logging/logging.go b/pkg/logging/logging.go new file mode 100644 index 0000000..9bcd0ab --- /dev/null +++ b/pkg/logging/logging.go @@ -0,0 +1,313 @@ +// Package logging is a thin wrapper around log/slog providing: +// - dual-mode output: a human-readable console handler and a structured +// JSON handler, both of which can be active simultaneously; +// - per-component source tagging ([AFP], [ASP], [EtherTalk], ...) rendered +// as a prefix in console output and emitted as "source":"AFP" in JSON; +// - context-carried loggers so correlation fields (session, volume) flow +// through call chains without threading a logger parameter everywhere. +// +// Construct one root logger in main via New(root, opts), then derive +// per-service loggers with logger.With("source", "AFP") or Child(parent, +// "AFP"). Handler wiring is owned here; callers should never touch +// slog.NewJSONHandler/slog.NewTextHandler directly. +package logging + +import ( + "context" + "fmt" + "io" + "log/slog" + "os" + "strings" + "sync" +) + +// Format selects one of the handlers emitted by New. +type Format int + +const ( + // FormatConsole is a human-readable single-line format with a [SOURCE] + // prefix. Intended for TTY/stderr. + FormatConsole Format = iota + // FormatJSON is newline-delimited slog JSON. Intended for log pipelines. + FormatJSON +) + +// Sink describes one output. Multiple sinks may be combined via New. +type Sink struct { + Writer io.Writer + Format Format + Level slog.Level +} + +// Options configures New. +type Options struct { + // Sinks listed here receive every record the root logger emits. If + // empty, a single console sink at LevelInfo on stderr is used. + Sinks []Sink + // Color enables ANSI colouring of the level tag in console output. The + // zero value is "off"; callers that want auto-detection should pass + // term.IsTerminal(int(os.Stderr.Fd())). + Color bool +} + +// New returns a root *slog.Logger carrying the given source tag. Pass the +// returned logger into services; each service should further narrow with +// logger.With("source", ) via Child to replace (not append) +// the source field. +func New(source string, opts Options) *slog.Logger { + sinks := opts.Sinks + if len(sinks) == 0 { + sinks = []Sink{{Writer: os.Stderr, Format: FormatConsole, Level: slog.LevelInfo}} + } + handlers := make([]slog.Handler, 0, len(sinks)) + for _, s := range sinks { + handlers = append(handlers, newHandler(s, opts.Color)) + } + var h slog.Handler + if len(handlers) == 1 { + h = handlers[0] + } else { + h = fanoutHandler(handlers) + } + l := slog.New(h) + if source != "" { + l = l.With(slog.String("source", source)) + } + return l +} + +// Child derives a sub-logger whose source attribute replaces (not appends) +// the parent's. Useful when a sub-component needs its own tag (e.g. a fork +// subsystem inside AFP wants [AFP.Fork]). +func Child(parent *slog.Logger, source string) *slog.Logger { + if parent == nil { + parent = slog.Default() + } + return parent.With(slog.String("source", source)) +} + +type ctxKey struct{} + +// WithContext attaches a logger to ctx. FromContext will return it. +func WithContext(ctx context.Context, l *slog.Logger) context.Context { + return context.WithValue(ctx, ctxKey{}, l) +} + +// FromContext returns the logger stored by WithContext, falling back to +// slog.Default() when nothing is attached. +func FromContext(ctx context.Context) *slog.Logger { + if ctx != nil { + if l, ok := ctx.Value(ctxKey{}).(*slog.Logger); ok && l != nil { + return l + } + } + return slog.Default() +} + +// SetDefault installs l as slog.Default and returns a restore func for +// tests. +func SetDefault(l *slog.Logger) func() { + prev := slog.Default() + slog.SetDefault(l) + return func() { slog.SetDefault(prev) } +} + +// ParseLevel maps "debug" / "info" / "warn" / "warning" / "error" to +// slog.Level. Unknown values return slog.LevelInfo and ok=false. +func ParseLevel(s string) (slog.Level, bool) { + switch strings.ToLower(strings.TrimSpace(s)) { + case "debug": + return slog.LevelDebug, true + case "info", "": + return slog.LevelInfo, true + case "warn", "warning": + return slog.LevelWarn, true + case "error": + return slog.LevelError, true + } + return slog.LevelInfo, false +} + +func newHandler(s Sink, color bool) slog.Handler { + if s.Writer == nil { + s.Writer = os.Stderr + } + switch s.Format { + case FormatJSON: + return slog.NewJSONHandler(s.Writer, &slog.HandlerOptions{Level: s.Level}) + default: + return &consoleHandler{w: s.Writer, level: s.Level, color: color, mu: &sync.Mutex{}} + } +} + +// consoleHandler is a minimal slog.Handler that renders +// +// [2026-04-24 14:05:12] INFO [AFP] message key=value +// +// It lifts the "source" attribute into the bracketed prefix and formats +// the remaining attributes as key=value pairs. It is deliberately small +// and allocation-light; callers who need slog's full feature set should +// use FormatJSON. +type consoleHandler struct { + w io.Writer + level slog.Level + color bool + // mu guards writes to w. It is a pointer so WithAttrs/WithGroup clones + // share the same lock on the same writer. + mu *sync.Mutex + // attrs and groups are accumulated via WithAttrs/WithGroup. + attrs []slog.Attr + groups []string +} + +func (h *consoleHandler) Enabled(_ context.Context, l slog.Level) bool { + return l >= h.level +} + +func (h *consoleHandler) Handle(_ context.Context, r slog.Record) error { + var sb strings.Builder + sb.WriteByte('[') + sb.WriteString(r.Time.Format("2006-01-02 15:04:05")) + sb.WriteString("] ") + sb.WriteString(levelTag(r.Level, h.color)) + + // Extract source from accumulated attrs and record attrs. + source := "" + var rest []slog.Attr + for _, a := range h.attrs { + if a.Key == "source" { + source = a.Value.String() + continue + } + rest = append(rest, a) + } + var recordAttrs []slog.Attr + r.Attrs(func(a slog.Attr) bool { + if a.Key == "source" { + source = a.Value.String() + return true + } + recordAttrs = append(recordAttrs, a) + return true + }) + + if source != "" { + sb.WriteString(" [") + sb.WriteString(source) + sb.WriteByte(']') + } + sb.WriteByte(' ') + sb.WriteString(r.Message) + + for _, a := range rest { + appendAttr(&sb, a) + } + for _, a := range recordAttrs { + appendAttr(&sb, a) + } + + sb.WriteByte('\n') + h.mu.Lock() + defer h.mu.Unlock() + _, err := io.WriteString(h.w, sb.String()) + return err +} + +func (h *consoleHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + clone := *h + clone.attrs = append(append([]slog.Attr{}, h.attrs...), attrs...) + return &clone +} + +func (h *consoleHandler) WithGroup(name string) slog.Handler { + clone := *h + clone.groups = append(append([]string{}, h.groups...), name) + return &clone +} + +func appendAttr(sb *strings.Builder, a slog.Attr) { + if a.Equal(slog.Attr{}) { + return + } + sb.WriteByte(' ') + sb.WriteString(a.Key) + sb.WriteByte('=') + v := a.Value.String() + if strings.ContainsAny(v, " \t\"") { + fmt.Fprintf(sb, "%q", v) + } else { + sb.WriteString(v) + } +} + +func levelTag(l slog.Level, color bool) string { + var tag string + switch { + case l >= slog.LevelError: + tag = "ERROR" + case l >= slog.LevelWarn: + tag = "WARN " + case l >= slog.LevelInfo: + tag = "INFO " + default: + tag = "DEBUG" + } + if !color { + return tag + } + switch { + case l >= slog.LevelError: + return "\x1b[31m" + tag + "\x1b[0m" + case l >= slog.LevelWarn: + return "\x1b[33m" + tag + "\x1b[0m" + case l >= slog.LevelInfo: + return "\x1b[32m" + tag + "\x1b[0m" + default: + return "\x1b[90m" + tag + "\x1b[0m" + } +} + +// fanoutHandler broadcasts each record to every contained handler whose +// Enabled returns true. Used when Options.Sinks has >1 entry. +type fanout []slog.Handler + +func fanoutHandler(hs []slog.Handler) slog.Handler { return fanout(hs) } + +func (f fanout) Enabled(ctx context.Context, l slog.Level) bool { + for _, h := range f { + if h.Enabled(ctx, l) { + return true + } + } + return false +} + +func (f fanout) Handle(ctx context.Context, r slog.Record) error { + var firstErr error + for _, h := range f { + if !h.Enabled(ctx, r.Level) { + continue + } + if err := h.Handle(ctx, r.Clone()); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +func (f fanout) WithAttrs(attrs []slog.Attr) slog.Handler { + out := make(fanout, len(f)) + for i, h := range f { + out[i] = h.WithAttrs(attrs) + } + return out +} + +func (f fanout) WithGroup(name string) slog.Handler { + out := make(fanout, len(f)) + for i, h := range f { + out[i] = h.WithGroup(name) + } + return out +} diff --git a/pkg/logging/logging_test.go b/pkg/logging/logging_test.go new file mode 100644 index 0000000..acac9c1 --- /dev/null +++ b/pkg/logging/logging_test.go @@ -0,0 +1,127 @@ +package logging + +import ( + "bytes" + "context" + "encoding/json" + "log/slog" + "strings" + "testing" +) + +func TestConsoleHandlerRendersSourcePrefix(t *testing.T) { + t.Parallel() + var buf bytes.Buffer + l := New("AFP", Options{Sinks: []Sink{{Writer: &buf, Format: FormatConsole, Level: slog.LevelInfo}}}) + l.Info("OpenFork", "refnum", 12) + + got := buf.String() + if !strings.Contains(got, "[AFP]") { + t.Fatalf("missing source prefix in console output: %q", got) + } + if !strings.Contains(got, "OpenFork") { + t.Fatalf("missing message: %q", got) + } + if !strings.Contains(got, "refnum=12") { + t.Fatalf("missing attr: %q", got) + } +} + +func TestJSONHandlerEmitsSourceAttr(t *testing.T) { + t.Parallel() + var buf bytes.Buffer + l := New("ASP", Options{Sinks: []Sink{{Writer: &buf, Format: FormatJSON, Level: slog.LevelInfo}}}) + l.Info("OpenSess", "sess", "01HF") + + var got map[string]any + if err := json.Unmarshal(buf.Bytes(), &got); err != nil { + t.Fatalf("json unmarshal: %v (raw: %q)", err, buf.String()) + } + if got["source"] != "ASP" { + t.Fatalf("source: want ASP, got %v", got["source"]) + } + if got["msg"] != "OpenSess" { + t.Fatalf("msg: want OpenSess, got %v", got["msg"]) + } + if got["sess"] != "01HF" { + t.Fatalf("sess attr missing: %v", got) + } +} + +func TestDualSinkFanout(t *testing.T) { + t.Parallel() + var console, jsonBuf bytes.Buffer + l := New("ZIP", Options{Sinks: []Sink{ + {Writer: &console, Format: FormatConsole, Level: slog.LevelInfo}, + {Writer: &jsonBuf, Format: FormatJSON, Level: slog.LevelInfo}, + }}) + l.Info("hello") + + if !strings.Contains(console.String(), "[ZIP]") { + t.Errorf("console missing prefix: %q", console.String()) + } + if !strings.Contains(jsonBuf.String(), `"source":"ZIP"`) { + t.Errorf("json missing source: %q", jsonBuf.String()) + } +} + +func TestLevelFilter(t *testing.T) { + t.Parallel() + var buf bytes.Buffer + l := New("X", Options{Sinks: []Sink{{Writer: &buf, Format: FormatConsole, Level: slog.LevelWarn}}}) + l.Info("quiet") + l.Warn("loud") + got := buf.String() + if strings.Contains(got, "quiet") { + t.Errorf("info should have been filtered: %q", got) + } + if !strings.Contains(got, "loud") { + t.Errorf("warn should have emitted: %q", got) + } +} + +func TestContextLogger(t *testing.T) { + t.Parallel() + var buf bytes.Buffer + l := New("Router", Options{Sinks: []Sink{{Writer: &buf, Format: FormatConsole, Level: slog.LevelInfo}}}) + ctx := WithContext(context.Background(), l.With("session", "abc")) + + FromContext(ctx).Info("tick") + got := buf.String() + if !strings.Contains(got, "session=abc") { + t.Fatalf("context logger did not carry session: %q", got) + } +} + +func TestChildReplacesSource(t *testing.T) { + t.Parallel() + var buf bytes.Buffer + root := New("AFP", Options{Sinks: []Sink{{Writer: &buf, Format: FormatConsole, Level: slog.LevelInfo}}}) + sub := Child(root, "AFP.Fork") + sub.Info("open") + + out := buf.String() + if !strings.Contains(out, "[AFP.Fork]") { + t.Fatalf("child source missing: %q", out) + } +} + +func TestParseLevel(t *testing.T) { + t.Parallel() + cases := map[string]slog.Level{ + "debug": slog.LevelDebug, + "info": slog.LevelInfo, + "warn": slog.LevelWarn, + "warning": slog.LevelWarn, + "error": slog.LevelError, + } + for in, want := range cases { + got, ok := ParseLevel(in) + if !ok || got != want { + t.Errorf("ParseLevel(%q) = (%v, %v); want (%v, true)", in, got, ok, want) + } + } + if _, ok := ParseLevel("bogus"); ok { + t.Errorf("ParseLevel(bogus) should return ok=false") + } +} diff --git a/pkg/telemetry/format.go b/pkg/telemetry/format.go new file mode 100644 index 0000000..854feed --- /dev/null +++ b/pkg/telemetry/format.go @@ -0,0 +1,17 @@ +package telemetry + +import ( + "math" + "strconv" +) + +func i64string(v int64) string { + return strconv.FormatInt(v, 10) +} + +func f64string(v float64) string { + return strconv.FormatFloat(v, 'g', -1, 64) +} + +func float64frombits(b uint64) float64 { return math.Float64frombits(b) } +func float64tobits(f float64) uint64 { return math.Float64bits(f) } diff --git a/pkg/telemetry/telemetry.go b/pkg/telemetry/telemetry.go new file mode 100644 index 0000000..260e198 --- /dev/null +++ b/pkg/telemetry/telemetry.go @@ -0,0 +1,123 @@ +// Package telemetry is OmniTalk's metrics abstraction. It exposes +// Counter, Gauge, and Histogram types with a default expvar-backed +// implementation that ships as part of the stdlib and requires no +// extra dependencies. A build-tagged OpenTelemetry backend may be +// swapped in by adding //go:build otel files alongside this one. +// +// Telemetry is deliberately separate from structured logging +// (pkg/logging): counters and histograms are cheap and continuous, +// logs are discrete events. Use both. +// +// Usage: +// +// var framesIn = telemetry.NewCounter("omnitalk_router_frames_in_total") +// framesIn.Inc() +// framesIn.Add(n) +// +// Metric names follow Prometheus-style lower_snake_case with a unit +// suffix (_total, _seconds, _bytes). Labels are encoded into the name +// for the expvar backend (e.g. "omnitalk_afp_commands_total_OpenFork") +// because expvar does not support label dimensions natively; the OTel +// backend splits them back out. +package telemetry + +import ( + "expvar" + "sync/atomic" +) + +// Counter is a monotonically increasing integer metric. +type Counter interface { + Inc() + Add(delta int64) + Value() int64 +} + +// Gauge is an integer metric that may go up and down. +type Gauge interface { + Set(v int64) + Add(delta int64) + Value() int64 +} + +// Histogram records an observation distribution. The default expvar +// backend keeps a simple count + sum + min + max; richer backends +// (OTel) record full buckets. +type Histogram interface { + Observe(v float64) +} + +// NewCounter returns a Counter registered under name. +// Calling NewCounter twice with the same name returns the same instance. +func NewCounter(name string) Counter { + if v := expvar.Get(name); v != nil { + if c, ok := v.(*expvarCounter); ok { + return c + } + } + c := &expvarCounter{} + expvar.Publish(name, c) + return c +} + +// NewGauge returns a Gauge registered under name. +func NewGauge(name string) Gauge { + if v := expvar.Get(name); v != nil { + if g, ok := v.(*expvarGauge); ok { + return g + } + } + g := &expvarGauge{} + expvar.Publish(name, g) + return g +} + +// NewHistogram returns a Histogram registered under name. +func NewHistogram(name string) Histogram { + if v := expvar.Get(name); v != nil { + if h, ok := v.(*expvarHistogram); ok { + return h + } + } + h := &expvarHistogram{} + expvar.Publish(name, h) + return h +} + +// --- expvar implementations --- + +type expvarCounter struct{ n atomic.Int64 } + +func (c *expvarCounter) Inc() { c.n.Add(1) } +func (c *expvarCounter) Add(d int64) { c.n.Add(d) } +func (c *expvarCounter) Value() int64 { return c.n.Load() } +func (c *expvarCounter) String() string { return i64string(c.n.Load()) } + +type expvarGauge struct{ n atomic.Int64 } + +func (g *expvarGauge) Set(v int64) { g.n.Store(v) } +func (g *expvarGauge) Add(d int64) { g.n.Add(d) } +func (g *expvarGauge) Value() int64 { return g.n.Load() } +func (g *expvarGauge) String() string { return i64string(g.n.Load()) } + +type expvarHistogram struct { + count atomic.Int64 + sumB atomic.Uint64 // float64 bits +} + +func (h *expvarHistogram) Observe(v float64) { + h.count.Add(1) + for { + old := h.sumB.Load() + sum := float64frombits(old) + v + if h.sumB.CompareAndSwap(old, float64tobits(sum)) { + return + } + } +} + +func (h *expvarHistogram) String() string { + count := h.count.Load() + sum := float64frombits(h.sumB.Load()) + return `{"count":` + i64string(count) + `,"sum":` + f64string(sum) + `}` +} diff --git a/pkg/telemetry/telemetry_test.go b/pkg/telemetry/telemetry_test.go new file mode 100644 index 0000000..4f39df4 --- /dev/null +++ b/pkg/telemetry/telemetry_test.go @@ -0,0 +1,50 @@ +package telemetry + +import ( + "expvar" + "testing" +) + +func TestCounter(t *testing.T) { + t.Parallel() + c := NewCounter("test_counter_total") + c.Inc() + c.Add(4) + if c.Value() != 5 { + t.Fatalf("Value = %d, want 5", c.Value()) + } + if v := expvar.Get("test_counter_total"); v == nil || v.String() != "5" { + t.Fatalf("expvar publish mismatch: %v", v) + } +} + +func TestCounterReregistration(t *testing.T) { + t.Parallel() + a := NewCounter("test_reregister_total") + a.Add(3) + b := NewCounter("test_reregister_total") + if b.Value() != 3 { + t.Fatalf("re-registered counter lost state: %d", b.Value()) + } +} + +func TestGauge(t *testing.T) { + t.Parallel() + g := NewGauge("test_gauge") + g.Set(10) + g.Add(-3) + if g.Value() != 7 { + t.Fatalf("Value = %d, want 7", g.Value()) + } +} + +func TestHistogram(t *testing.T) { + t.Parallel() + h := NewHistogram("test_hist") + h.Observe(1.5) + h.Observe(2.5) + s := h.(*expvarHistogram).String() + if s != `{"count":2,"sum":4}` { + t.Fatalf("String = %q", s) + } +} diff --git a/port/doc.go b/port/doc.go new file mode 100644 index 0000000..88df031 --- /dev/null +++ b/port/doc.go @@ -0,0 +1,15 @@ +/* +Package port defines the Port interface — the link-layer abstraction the +router uses to send and receive DDP datagrams. Concrete implementations +live in subpackages (port/ethertalk, port/localtalk, port/rawlink, …). + +A Port owns a single network attachment: it knows its AppleTalk network +range and node number, can unicast/broadcast/multicast DDP datagrams, +and delivers inbound datagrams up through the RouterHooks callback the +router supplies at Start. + +The optional BridgeConfigurable interface lets EtherTalk-style ports +expose bridge-mode configuration without requiring every Port to grow +the same surface; main.go type-asserts and configures only when needed. +*/ +package port diff --git a/port/ethertalk/config.go b/port/ethertalk/config.go new file mode 100644 index 0000000..133a48a --- /dev/null +++ b/port/ethertalk/config.go @@ -0,0 +1,66 @@ +package ethertalk + +import ( + "fmt" + "strings" +) + +// Config is EtherTalk's user-facing configuration. Source-agnostic and +// populated via koanf tags by any caller that wires up a config source. +type Config struct { + // Backend selects the link-layer driver: pcap (default), tap, tun, + // or "" to disable EtherTalk entirely. + Backend string `koanf:"backend"` + // Device is the network interface or pcap device name. + Device string `koanf:"device"` + // HWAddress is the EtherTalk router MAC (6-byte EUI-48). + HWAddress string `koanf:"hw_address"` + // BridgeMode controls the bridge shim: auto, ethernet, or wifi. + BridgeMode string `koanf:"bridge_mode"` + // BridgeHostMAC is the host adapter's own MAC, used by the Wi-Fi + // bridge shim. Defaults to HWAddress when blank. + BridgeHostMAC string `koanf:"bridge_host_mac"` + SeedNetworkMin uint `koanf:"seed_network_min"` + SeedNetworkMax uint `koanf:"seed_network_max"` + SeedZone string `koanf:"seed_zone"` + DesiredNetwork uint `koanf:"desired_network"` + DesiredNode uint `koanf:"desired_node"` +} + +// DefaultConfig returns EtherTalk's built-in defaults. +func DefaultConfig() Config { + return Config{ + Backend: "pcap", + HWAddress: "DE:AD:BE:EF:CA:FE", + BridgeMode: "auto", + SeedNetworkMin: 3, + SeedNetworkMax: 5, + SeedZone: "EtherTalk Network", + DesiredNetwork: 3, + DesiredNode: 253, + } +} + +// Validate checks the config for logical consistency. It does not check +// that the device is reachable — that's a runtime concern. +func (c *Config) Validate() error { + switch strings.ToLower(strings.TrimSpace(c.Backend)) { + case "", "pcap", "tap", "tun": + default: + return fmt.Errorf("EtherTalk.backend must be blank, pcap, tap, or tun, got %q", c.Backend) + } + if c.Backend != "" && c.SeedNetworkMin > c.SeedNetworkMax { + return fmt.Errorf("EtherTalk.seed_network_min (%d) must be <= seed_network_max (%d)", c.SeedNetworkMin, c.SeedNetworkMax) + } + switch strings.ToLower(strings.TrimSpace(c.BridgeMode)) { + case "", "auto", "ethernet", "wifi": + default: + return fmt.Errorf("EtherTalk.bridge_mode must be auto, ethernet, or wifi, got %q", c.BridgeMode) + } + return nil +} + +// Enabled reports whether the EtherTalk port should be created at all. +func (c *Config) Enabled() bool { + return strings.TrimSpace(c.Backend) != "" && strings.TrimSpace(c.Device) != "" +} diff --git a/port/ethertalk/doc.go b/port/ethertalk/doc.go new file mode 100644 index 0000000..bf95b21 --- /dev/null +++ b/port/ethertalk/doc.go @@ -0,0 +1,7 @@ +// Package ethertalk implements EtherTalk (AppleTalk Phase 2 over +// Ethernet) as an OmniTalk port. +// +// Frames are sent and received via libpcap/Npcap on the host +// interface. The package also implements AARP (RFC 1742, Appendix A) +// for AppleTalk-to-Ethernet address resolution. +package ethertalk diff --git a/port/ethertalk/ethertalk.go b/port/ethertalk/ethertalk.go index e62b37d..839150f 100644 --- a/port/ethertalk/ethertalk.go +++ b/port/ethertalk/ethertalk.go @@ -7,9 +7,10 @@ import ( "sync" "time" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port" ) var ( @@ -46,7 +47,7 @@ type amtEntry struct { } type heldDatagram struct { - d appletalk.Datagram + d ddp.Datagram when time.Time } @@ -250,6 +251,7 @@ func (p *Port) acquireAddressRun() { probeNd := p.probeNode p.probeAttempts++ p.probeMu.Unlock() + aarpProbeRetriesTotal.Inc() p.sendAARPProbe(probeNet, probeNd) } } @@ -380,7 +382,7 @@ func (p *Port) sendFrame(dst, payload []byte) { _ = p.tx(f) } -func (p *Port) sendDatagram(dst []byte, d appletalk.Datagram) { +func (p *Port) sendDatagram(dst []byte, d ddp.Datagram) { b, err := d.AsLongHeaderBytes(true) if err != nil { return @@ -482,7 +484,7 @@ func (p *Port) InboundFrame(frame []byte) { if bytes.Equal(frame[17:22], snapAppleTalk) { netlog.LogEthernetFrameInbound(frame, p) - d, err := appletalk.DatagramFromLongHeaderBytes(frame[22:14+length], false) + d, err := ddp.DatagramFromLongHeaderBytes(frame[22:14+length], false) if err != nil { netlog.Debug("%s failed to parse AppleTalk datagram from EtherTalk frame: %v", p.ShortString(), err) return @@ -502,7 +504,7 @@ func (p *Port) InboundFrame(frame []byte) { } } -func (p *Port) Unicast(network uint16, node uint8, d appletalk.Datagram) { +func (p *Port) Unicast(network uint16, node uint8, d ddp.Datagram) { netlog.LogDatagramUnicast(network, node, d, p) key := [2]uint16{network, uint16(node)} p.tableMu.Lock() @@ -523,7 +525,7 @@ func (p *Port) Unicast(network uint16, node uint8, d appletalk.Datagram) { } } -func (p *Port) Broadcast(d appletalk.Datagram) { +func (p *Port) Broadcast(d ddp.Datagram) { if d.DestinationNetwork != 0 || d.DestinationNode != 0xFF { d.DestinationNetwork = 0 d.DestinationNode = 0xFF @@ -532,7 +534,7 @@ func (p *Port) Broadcast(d appletalk.Datagram) { p.sendDatagram(elapBroadcast, d) } -func (p *Port) Multicast(zoneName []byte, d appletalk.Datagram) { +func (p *Port) Multicast(zoneName []byte, d ddp.Datagram) { netlog.LogDatagramMulticast(zoneName, d, p) // Use the EtherTalk-wide broadcast (09:00:07:FF:FF:FF) rather than the // zone-specific multicast. All Phase 2 nodes must accept this address, whereas @@ -542,7 +544,7 @@ func (p *Port) Multicast(zoneName []byte, d appletalk.Datagram) { } func (p *Port) MulticastAddress(zoneName []byte) []byte { - sum := appletalk.DDPChecksum(ucase(zoneName)) + sum := ddp.Checksum(ucase(zoneName)) return []byte{elapMCprefix[0], elapMCprefix[1], elapMCprefix[2], elapMCprefix[3], elapMCprefix[4], byte(sum % 0xFD)} } diff --git a/port/ethertalk/ethertalk_bridge.go b/port/ethertalk/ethertalk_bridge.go index 24292c5..ede28fb 100644 --- a/port/ethertalk/ethertalk_bridge.go +++ b/port/ethertalk/ethertalk_bridge.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/pgodw/omnitalk/go/port/rawlink" + "github.com/pgodw/omnitalk/port/rawlink" ) type bridgeMode uint8 diff --git a/port/ethertalk/ethertalk_bridge_test.go b/port/ethertalk/ethertalk_bridge_test.go index 29e0b18..f3440af 100644 --- a/port/ethertalk/ethertalk_bridge_test.go +++ b/port/ethertalk/ethertalk_bridge_test.go @@ -5,7 +5,7 @@ import ( "encoding/binary" "testing" - "github.com/pgodw/omnitalk/go/port/rawlink" + "github.com/pgodw/omnitalk/port/rawlink" ) func TestBridgeAdapterInboundPassThroughCopy(t *testing.T) { diff --git a/port/ethertalk/metrics.go b/port/ethertalk/metrics.go new file mode 100644 index 0000000..c4c72a9 --- /dev/null +++ b/port/ethertalk/metrics.go @@ -0,0 +1,5 @@ +package ethertalk + +import "github.com/pgodw/omnitalk/pkg/telemetry" + +var aarpProbeRetriesTotal = telemetry.NewCounter("omnitalk_aarp_probe_retries_total") diff --git a/port/ethertalk/options.go b/port/ethertalk/options.go new file mode 100644 index 0000000..e8d041e --- /dev/null +++ b/port/ethertalk/options.go @@ -0,0 +1,21 @@ +package ethertalk + +// Options bundles immutable construction inputs for an EtherTalk PcapPort +// (or its Tap variant). Keeping bridge configuration here means callers set +// it up-front rather than mutating the port after Start. +type Options struct { + InterfaceName string + HWAddr []byte + SeedNetworkMin uint16 + SeedNetworkMax uint16 + DesiredNetwork uint16 + DesiredNode uint8 + SeedZoneNames [][]byte + + // BridgeMode is the textual bridge mode ("", "auto", "ethernet", "wifi"). + // Empty is treated as "auto". + BridgeMode string + // BridgeHostMAC is the host adapter's MAC for the Wi-Fi bridge shim. + // When nil, falls back to HWAddr. + BridgeHostMAC []byte +} diff --git a/port/ethertalk/pcap.go b/port/ethertalk/pcap.go index 45e4415..b257dcd 100644 --- a/port/ethertalk/pcap.go +++ b/port/ethertalk/pcap.go @@ -1,11 +1,11 @@ package ethertalk import ( - "log" "net" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/port/rawlink" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/port/rawlink" ) // etherTalkBPFFilter selects EtherTalk Phase 2 frames carried as @@ -35,23 +35,38 @@ type PcapPort struct { writerDone chan struct{} } -func NewPcapPort(interfaceName string, hwAddr []byte, seedNetworkMin, seedNetworkMax, desiredNetwork uint16, desiredNode uint8, seedZoneNames [][]byte) (*PcapPort, error) { - if len(hwAddr) != 6 { +func NewPcapPort(opts Options) (*PcapPort, error) { + if len(opts.HWAddr) != 6 { return nil, net.InvalidAddrError("hw_addr must be exactly 6 bytes") } - base := New(hwAddr, seedNetworkMin, seedNetworkMax, desiredNetwork, desiredNode, seedZoneNames) + mode, err := parseBridgeModeString(opts.BridgeMode) + if err != nil { + return nil, err + } + hostMAC := opts.BridgeHostMAC + if hostMAC == nil { + hostMAC = opts.HWAddr + } + if len(hostMAC) != 6 { + return nil, net.InvalidAddrError("bridge host mac must be exactly 6 bytes") + } + base := New(opts.HWAddr, opts.SeedNetworkMin, opts.SeedNetworkMax, opts.DesiredNetwork, opts.DesiredNode, opts.SeedZoneNames) + resolvedMode := mode + if resolvedMode == bridgeModeAuto { + resolvedMode = bridgeModeEthernet + } p := &PcapPort{ Port: base, - interfaceName: interfaceName, + interfaceName: opts.InterfaceName, backendLabel: "pcap", openLink: func(name string) (rawlink.RawLink, error) { return rawlink.OpenPcap(rawlink.DefaultEtherTalkConfig(name)) }, applyBPFFilter: true, medium: rawlink.MediumEthernet, - hostMAC: append([]byte(nil), hwAddr...), - bridgeMode: bridgeModeAuto, - adapter: newEthertalkBridgeAdapterWithWiFiEncap(hwAddr, hwAddr, bridgeModeEthernet, false), + hostMAC: append([]byte(nil), hostMAC...), + bridgeMode: mode, + adapter: newEthertalkBridgeAdapterWithWiFiEncap(hostMAC, opts.HWAddr, resolvedMode, false), readerStop: make(chan struct{}), readerDone: make(chan struct{}), writerQueue: make(chan []byte, 1024), @@ -121,15 +136,15 @@ func (p *PcapPort) Start(r port.RouterHooks) error { } p.setResolvedBridgeMode(mode) if p.bridgeMode == bridgeModeWiFi && !bridgeModeRequiresWiFiEncapsulation(p.medium) { - log.Printf("pcap wifi bridge on %s using Ethernet TX framing (medium: ethernet)", p.interfaceName) + netlog.Info("pcap wifi bridge on %s using Ethernet TX framing (medium: ethernet)", p.interfaceName) } - log.Printf("%s bridge mode on %s: %s (medium: %v)", p.backendLabel, p.interfaceName, p.bridgeMode.String(), p.medium) + netlog.Info("%s bridge mode on %s: %s (medium: %v)", p.backendLabel, p.interfaceName, p.bridgeMode.String(), p.medium) // Apply BPF filter when the backend supports it. if p.applyBPFFilter { if fl, ok := link.(rawlink.FilterableLink); ok { if err := fl.SetFilter(etherTalkBPFFilter); err != nil { - log.Printf("warning: could not set BPF filter on %s: %v", p.interfaceName, err) + netlog.Warn("could not set BPF filter on %s: %v", p.interfaceName, err) } } } @@ -163,13 +178,13 @@ func (p *PcapPort) readRun() { data, err := p.link.ReadFrame() if err != nil { if err != rawlink.ErrTimeout { - log.Printf("pcap read error on %s: %v", p.interfaceName, err) + netlog.Warn("pcap read error on %s: %v", p.interfaceName, err) } continue } normalized, err := p.adapter.inboundFrame(data) if err != nil { - log.Printf("warning: failed to normalize inbound frame on %s: %v", p.interfaceName, err) + netlog.Warn("failed to normalize inbound frame on %s: %v", p.interfaceName, err) continue } p.InboundFrame(normalized) @@ -181,7 +196,7 @@ func (p *PcapPort) sendFrame(frameData []byte) { select { case p.writerQueue <- frameData: default: - log.Printf("warning: pcap writer queue full, dropping outbound packet") + netlog.Warn("pcap writer queue full, dropping outbound packet") } } @@ -194,11 +209,11 @@ func (p *PcapPort) writeRun() { case frameData := <-p.writerQueue: prepared, err := p.adapter.outboundFrame(frameData) if err != nil { - log.Printf("warning: failed to prepare outbound frame on %s: %v", p.interfaceName, err) + netlog.Warn("failed to prepare outbound frame on %s: %v", p.interfaceName, err) continue } if err := p.link.WriteFrame(prepared); err != nil { - log.Printf("warning: couldn't send packet: %v", err) + netlog.Warn("couldn't send packet: %v", err) } } } diff --git a/port/ethertalk/tap.go b/port/ethertalk/tap.go index 8fc82fe..5e2a3e0 100644 --- a/port/ethertalk/tap.go +++ b/port/ethertalk/tap.go @@ -1,11 +1,11 @@ package ethertalk -import "github.com/pgodw/omnitalk/go/port/rawlink" +import "github.com/pgodw/omnitalk/port/rawlink" // NewTapPort creates an EtherTalk port over a TAP-style raw link backend. // TAP support depends on rawlink.OpenTAP for the current platform. -func NewTapPort(interfaceName string, hwAddr []byte, seedNetworkMin, seedNetworkMax, desiredNetwork uint16, desiredNode uint8, seedZoneNames [][]byte) (*PcapPort, error) { - p, err := NewPcapPort(interfaceName, hwAddr, seedNetworkMin, seedNetworkMax, desiredNetwork, desiredNode, seedZoneNames) +func NewTapPort(opts Options) (*PcapPort, error) { + p, err := NewPcapPort(opts) if err != nil { return nil, err } diff --git a/port/localtalk/config.go b/port/localtalk/config.go new file mode 100644 index 0000000..baf0f77 --- /dev/null +++ b/port/localtalk/config.go @@ -0,0 +1,72 @@ +package localtalk + +import ( + "fmt" + "strings" +) + +// LToUDPConfig configures the LocalTalk-over-UDP port. +type LToUDPConfig struct { + Enabled bool `koanf:"enabled"` + Interface string `koanf:"interface"` + SeedNetwork uint `koanf:"seed_network"` + SeedZone string `koanf:"seed_zone"` +} + +// DefaultLToUDPConfig returns the built-in defaults. +func DefaultLToUDPConfig() LToUDPConfig { + return LToUDPConfig{ + Enabled: true, + Interface: "0.0.0.0", + SeedNetwork: 1, + SeedZone: "LToUDP Network", + } +} + +func (c *LToUDPConfig) Validate() error { + if !c.Enabled { + return nil + } + if strings.TrimSpace(c.SeedZone) == "" { + return fmt.Errorf("LToUdp.seed_zone must not be empty") + } + if c.SeedNetwork == 0 || c.SeedNetwork > 0xFFFE { + return fmt.Errorf("LToUdp.seed_network %d out of range", c.SeedNetwork) + } + return nil +} + +// TashTalkConfig configures the TashTalk serial LocalTalk adaptor port. +type TashTalkConfig struct { + // Port is the OS serial-device path (e.g. "COM1", "/dev/ttyAMA0"). + // Blank disables the TashTalk port entirely. + Port string `koanf:"port"` + SeedNetwork uint `koanf:"seed_network"` + SeedZone string `koanf:"seed_zone"` +} + +func DefaultTashTalkConfig() TashTalkConfig { + return TashTalkConfig{ + SeedNetwork: 2, + SeedZone: "TashTalk Network", + } +} + +func (c *TashTalkConfig) Validate() error { + if !c.Enabled() { + return nil + } + if strings.TrimSpace(c.SeedZone) == "" { + return fmt.Errorf("TashTalk.seed_zone must not be empty") + } + if c.SeedNetwork == 0 || c.SeedNetwork > 0xFFFE { + return fmt.Errorf("TashTalk.seed_network %d out of range", c.SeedNetwork) + } + return nil +} + +// Enabled reports whether the TashTalk port should be created. A blank +// Port disables the adaptor without erroring. +func (c *TashTalkConfig) Enabled() bool { + return strings.TrimSpace(c.Port) != "" +} diff --git a/port/localtalk/doc.go b/port/localtalk/doc.go new file mode 100644 index 0000000..393d32e --- /dev/null +++ b/port/localtalk/doc.go @@ -0,0 +1,8 @@ +// Package localtalk implements LocalTalk (AppleTalk Phase 1) as an +// OmniTalk port. +// +// LLAP frames travel over one of several physical/virtual transports +// implemented in subpackages: LToUDP (UDP multicast on +// 239.192.76.84:1954), TashTalk (serial-attached hardware at 1 Mbit/s), +// and a virtual loopback for tests. +package localtalk diff --git a/port/localtalk/llap.go b/port/localtalk/llap.go index 7d741e3..bb47335 100644 --- a/port/localtalk/llap.go +++ b/port/localtalk/llap.go @@ -1,72 +1,23 @@ package localtalk -import "fmt" +import "github.com/pgodw/omnitalk/protocol/llap" -const ( - LLAPTypeAppleTalkShortHeader = 0x01 - LLAPTypeAppleTalkLongHeader = 0x02 - LLAPTypeENQ = 0x81 - LLAPTypeACK = 0x82 - LLAPTypeRTS = 0x84 - LLAPTypeCTS = 0x85 +// LLAP wire-format types and codes have moved to protocol/llap. +// These aliases keep existing port-internal call sites unchanged while +// new code (service/llap, tests) imports protocol/llap directly. - LLAPBroadcastNode = 0xFF - LLAPMaxDataSize = 600 +const ( + LLAPTypeAppleTalkShortHeader = llap.TypeAppleTalkShortHeader + LLAPTypeAppleTalkLongHeader = llap.TypeAppleTalkLongHeader + LLAPTypeENQ = llap.TypeENQ + LLAPTypeACK = llap.TypeACK + LLAPTypeRTS = llap.TypeRTS + LLAPTypeCTS = llap.TypeCTS + + LLAPBroadcastNode = llap.BroadcastNode + LLAPMaxDataSize = llap.MaxDataSize ) -type LLAPFrame struct { - DestinationNode uint8 - SourceNode uint8 - Type uint8 - Payload []byte -} - -func LLAPFrameFromBytes(frame []byte) (LLAPFrame, error) { - if len(frame) < 3 { - return LLAPFrame{}, fmt.Errorf("LLAP frame too short: %d", len(frame)) - } - x := LLAPFrame{ - DestinationNode: frame[0], - SourceNode: frame[1], - Type: frame[2], - Payload: append([]byte(nil), frame[3:]...), - } - if err := x.Validate(); err != nil { - return LLAPFrame{}, err - } - return x, nil -} - -func (f LLAPFrame) Validate() error { - if f.IsControl() { - if len(f.Payload) != 0 { - return fmt.Errorf("LLAP control frame 0x%02X has payload length %d", f.Type, len(f.Payload)) - } - switch f.Type { - case LLAPTypeENQ, LLAPTypeACK, LLAPTypeRTS, LLAPTypeCTS: - return nil - default: - return fmt.Errorf("invalid LLAP control type 0x%02X", f.Type) - } - } - if !f.IsData() { - return fmt.Errorf("invalid LLAP frame type 0x%02X", f.Type) - } - if len(f.Payload) > LLAPMaxDataSize { - return fmt.Errorf("LLAP payload too large: %d", len(f.Payload)) - } - return nil -} - -func (f LLAPFrame) IsControl() bool { return f.Type >= 0x80 } - -func (f LLAPFrame) IsData() bool { - return f.Type == LLAPTypeAppleTalkShortHeader || f.Type == LLAPTypeAppleTalkLongHeader -} +type LLAPFrame = llap.Frame -func (f LLAPFrame) Bytes() []byte { - out := make([]byte, 0, 3+len(f.Payload)) - out = append(out, f.DestinationNode, f.SourceNode, f.Type) - out = append(out, f.Payload...) - return out -} +func LLAPFrameFromBytes(b []byte) (LLAPFrame, error) { return llap.FrameFromBytes(b) } diff --git a/port/localtalk/localtalk.go b/port/localtalk/localtalk.go index 3321de3..a9a6341 100644 --- a/port/localtalk/localtalk.go +++ b/port/localtalk/localtalk.go @@ -6,9 +6,10 @@ import ( "sync" "time" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port" ) const ( @@ -23,8 +24,8 @@ type FrameSender interface{ SendFrame(frame []byte) error } type LinkManager interface { RegisterPort(p *Port) InboundFrame(p *Port, frame LLAPFrame) - TransmitUnicast(p *Port, network uint16, node uint8, d appletalk.Datagram) - TransmitBroadcast(p *Port, d appletalk.Datagram) + TransmitUnicast(p *Port, network uint16, node uint8, d ddp.Datagram) + TransmitBroadcast(p *Port, d ddp.Datagram) } type Port struct { @@ -78,6 +79,13 @@ func New(seedNetwork uint16, seedZoneName []byte, respondToEnq bool, desiredNode } func (p *Port) ConfigureSendFrame(f func(frame []byte) error) { p.sendFrameFunc = f } + +// SetFrameSender wires the LocalTalk Port to a FrameSender backend. It +// is the interface-shaped counterpart to ConfigureSendFrame and the +// preferred way to attach new backends; ConfigureSendFrame remains for +// callers that already pass closures. +func (p *Port) SetFrameSender(fs FrameSender) { p.sendFrameFunc = fs.SendFrame } + func (p *Port) ShortString() string { return "LocalTalk" } func (p *Port) SetLLAPLinkManager(m LinkManager) { p.linkManager = m } func (p *Port) SetNodeIDChangeHook(hook func(node uint8)) { p.onNodeIDChange = hook } @@ -109,7 +117,7 @@ func (p *Port) SendRawLLAPFrame(frame LLAPFrame) error { return p.sendFrameFunc(b) } -func (p *Port) BuildDataFrame(dst uint8, d appletalk.Datagram) (LLAPFrame, error) { +func (p *Port) BuildDataFrame(dst uint8, d ddp.Datagram) (LLAPFrame, error) { p.mu.Lock() src := p.node network := p.network @@ -132,17 +140,17 @@ func (p *Port) BuildDataFrame(dst uint8, d appletalk.Datagram) (LLAPFrame, error return LLAPFrame{DestinationNode: dst, SourceNode: src, Type: llapAppleTalkLongHeader, Payload: payload}, nil } -func (p *Port) ParseInboundDataFrame(frame LLAPFrame) (appletalk.Datagram, error) { +func (p *Port) ParseInboundDataFrame(frame LLAPFrame) (ddp.Datagram, error) { switch frame.Type { case llapAppleTalkShortHeader: - return appletalk.DatagramFromShortHeaderBytes(frame.DestinationNode, frame.SourceNode, frame.Payload) + return ddp.DatagramFromShortHeaderBytes(frame.DestinationNode, frame.SourceNode, frame.Payload) case llapAppleTalkLongHeader: p.mu.Lock() verifyChecksums := p.verifyChecksums p.mu.Unlock() - return appletalk.DatagramFromLongHeaderBytes(frame.Payload, verifyChecksums) + return ddp.DatagramFromLongHeaderBytes(frame.Payload, verifyChecksums) default: - return appletalk.Datagram{}, fmt.Errorf("not a LocalTalk data frame: 0x%02X", frame.Type) + return ddp.Datagram{}, fmt.Errorf("not a LocalTalk data frame: 0x%02X", frame.Type) } } @@ -296,7 +304,7 @@ func (p *Port) InboundFrame(frame []byte) { dst, src, typ := parsed.DestinationNode, parsed.SourceNode, parsed.Type switch typ { case llapAppleTalkShortHeader: - d, err := appletalk.DatagramFromShortHeaderBytes(dst, src, parsed.Payload) + d, err := ddp.DatagramFromShortHeaderBytes(dst, src, parsed.Payload) if err != nil { netlog.Debug("%s failed to parse short-header AppleTalk datagram from LocalTalk frame: %v", p.ShortString(), err) } else { @@ -304,7 +312,7 @@ func (p *Port) InboundFrame(frame []byte) { p.router.Inbound(d, p) } case llapAppleTalkLongHeader: - d, err := appletalk.DatagramFromLongHeaderBytes(parsed.Payload, p.verifyChecksums) + d, err := ddp.DatagramFromLongHeaderBytes(parsed.Payload, p.verifyChecksums) if err != nil { netlog.Debug("%s failed to parse long-header AppleTalk datagram from LocalTalk frame: %v", p.ShortString(), err) } else { @@ -333,7 +341,7 @@ func (p *Port) InboundFrame(frame []byte) { } } -func (p *Port) Unicast(network uint16, node uint8, d appletalk.Datagram) { +func (p *Port) Unicast(network uint16, node uint8, d ddp.Datagram) { if p.linkManager != nil { p.linkManager.TransmitUnicast(p, network, node, d) return @@ -358,7 +366,7 @@ func (p *Port) Unicast(network uint16, node uint8, d appletalk.Datagram) { _ = p.sendFrameFunc(append([]byte{node, p.node, llapAppleTalkLongHeader}, b...)) } -func (p *Port) Broadcast(d appletalk.Datagram) { +func (p *Port) Broadcast(d ddp.Datagram) { if p.linkManager != nil { p.linkManager.TransmitBroadcast(p, d) return @@ -375,7 +383,7 @@ func (p *Port) Broadcast(d appletalk.Datagram) { _ = p.sendFrameFunc(append([]byte{0xFF, p.node, llapAppleTalkShortHeader}, b...)) } -func (p *Port) Multicast(zoneName []byte, d appletalk.Datagram) { +func (p *Port) Multicast(zoneName []byte, d ddp.Datagram) { netlog.LogDatagramMulticast(zoneName, d, p) p.Broadcast(d) } diff --git a/port/localtalk/ltoudp.go b/port/localtalk/ltoudp.go index 1d68c6f..b64bad9 100644 --- a/port/localtalk/ltoudp.go +++ b/port/localtalk/ltoudp.go @@ -12,8 +12,8 @@ import ( "golang.org/x/net/ipv4" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port" ) const ( @@ -35,7 +35,7 @@ type LtoudpPort struct { func NewLtoudpPort(intfAddr string, seedNetwork uint16, seedZoneName []byte) *LtoudpPort { base := New(seedNetwork, seedZoneName, true, 0xFE) p := &LtoudpPort{Port: base, intfAddr: intfAddr, stop: make(chan struct{})} - p.ConfigureSendFrame(p.sendFrame) + p.SetFrameSender(p) binary.BigEndian.PutUint32(p.senderID[:], uint32(os.Getpid())) return p } @@ -147,6 +147,10 @@ func (p *LtoudpPort) run() { } } +// SendFrame implements FrameSender by transmitting frame as one +// LToUDP datagram on the multicast group. +func (p *LtoudpPort) SendFrame(frame []byte) error { return p.sendFrame(frame) } + func (p *LtoudpPort) sendFrame(frame []byte) error { // Pull a scratch buffer from the pool so concurrent senders don't race. need := 4 + len(frame) diff --git a/port/localtalk/tashtalk.go b/port/localtalk/tashtalk.go index 2479035..7ff1216 100644 --- a/port/localtalk/tashtalk.go +++ b/port/localtalk/tashtalk.go @@ -10,9 +10,9 @@ import ( "time" serial "github.com/jacobsa/go-serial/serial" - "github.com/pgodw/omnitalk/go/netlog" + "github.com/pgodw/omnitalk/netlog" - "github.com/pgodw/omnitalk/go/port" + "github.com/pgodw/omnitalk/port" ) type TashTalkPort struct { @@ -29,7 +29,7 @@ func NewTashTalkPort(serialPort string, seedNetwork uint16, seedZoneName []byte) base.SetRTSCTSManagedByTransport(true) base.SetCTSResponseTimeout(25 * time.Millisecond) p := &TashTalkPort{Port: base, serialPort: serialPort, stop: make(chan struct{})} - p.ConfigureSendFrame(p.sendFrame) + p.SetFrameSender(p) p.SetNodeIDChangeHook(p.setNodeID) return p } @@ -86,6 +86,11 @@ func (p *TashTalkPort) Stop() error { return p.Port.Stop() } +// SendFrame implements FrameSender by transmitting frame over the +// TashTalk serial link with the protocol's framing byte and FCS +// appended. +func (p *TashTalkPort) SendFrame(frame []byte) error { return p.sendFrame(frame) } + func (p *TashTalkPort) sendFrame(frame []byte) error { withFCS := appendFCS(frame) packet := make([]byte, 0, 1+len(withFCS)) diff --git a/port/nat/ipnat.go b/port/nat/ipnat.go index 30cc88f..387f51f 100644 --- a/port/nat/ipnat.go +++ b/port/nat/ipnat.go @@ -13,9 +13,10 @@ import ( "golang.org/x/net/icmp" "golang.org/x/net/ipv4" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/service" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/service" ) const ( @@ -654,7 +655,7 @@ func (n *OSNAT) routeToMac(atNet uint16, atNode uint8, pkt []byte) { return } for _, frag := range frags { - _ = n.router.Route(appletalk.Datagram{ + _ = n.router.Route(ddp.Datagram{ DestinationNetwork: atNet, DestinationNode: atNode, DestinationSocket: n.socket, diff --git a/port/nat/iputil.go b/port/nat/iputil.go index e4665fa..9a0ad37 100644 --- a/port/nat/iputil.go +++ b/port/nat/iputil.go @@ -6,7 +6,7 @@ package nat import "encoding/binary" // MaxIPPerDDP is the maximum IP payload that fits in a single DDP packet -// (appletalk.MaxDataLength = 586 bytes). +// (ddp.MaxDataLength = 586 bytes). const MaxIPPerDDP = 586 // FragmentIPv4 splits pkt into fragments each ≤maxSize bytes. diff --git a/port/port.go b/port/port.go index 7ef60af..0f90e9d 100644 --- a/port/port.go +++ b/port/port.go @@ -1,18 +1,18 @@ package port -import "github.com/pgodw/omnitalk/go/appletalk" +import "github.com/pgodw/omnitalk/protocol/ddp" type RouterHooks interface { - Inbound(datagram appletalk.Datagram, rx Port) + Inbound(datagram ddp.Datagram, rx Port) } type Port interface { ShortString() string Start(router RouterHooks) error Stop() error - Unicast(network uint16, node uint8, datagram appletalk.Datagram) - Broadcast(datagram appletalk.Datagram) - Multicast(zoneName []byte, datagram appletalk.Datagram) + Unicast(network uint16, node uint8, datagram ddp.Datagram) + Broadcast(datagram ddp.Datagram) + Multicast(zoneName []byte, datagram ddp.Datagram) SetNetworkRange(networkMin, networkMax uint16) error Network() uint16 @@ -21,3 +21,23 @@ type Port interface { NetworkMax() uint16 ExtendedNetwork() bool } + +// BridgeConfigurable is implemented by ports that participate in an +// Ethernet-style bridge and need operator control over bridge mode and +// host-MAC synthesis. It is optional — callers type-assert on a Port to +// discover whether these knobs apply. EtherTalk pcap/tap ports +// implement it; LocalTalk and LToUDP ports do not. +// +// Keeping these methods out of the core Port interface means adding a +// new transport that does not need bridge configuration (e.g. a pure +// raw-socket port or a virtual test port) does not force a stub +// implementation. +type BridgeConfigurable interface { + // SetBridgeModeString sets the bridge mode from its textual form + // (e.g. "auto", "ethernet", "wifi"). Ports define their own accepted + // values; invalid input returns a non-nil error. + SetBridgeModeString(mode string) error + // SetBridgeHostMAC sets the MAC address the port presents to the + // bridged Ethernet segment. hostMAC must be a 6-byte EUI-48. + SetBridgeHostMAC(hostMAC []byte) error +} diff --git a/port/rawlink/pcap_detect.go b/port/rawlink/pcap_detect.go index e13be01..00a72f2 100644 --- a/port/rawlink/pcap_detect.go +++ b/port/rawlink/pcap_detect.go @@ -1,8 +1,9 @@ package rawlink import ( + "cmp" "net" - "sort" + "slices" "strings" tsfaces "tailscale.com/net/interfaces" @@ -114,7 +115,7 @@ func DetectHostMACForPcapInterface(interfaceName string) (string, bool) { } // Keep deterministic selection if multiple interfaces share the same IPv4. - sort.Slice(ifaces, func(i, j int) bool { return ifaces[i].Name < ifaces[j].Name }) + slices.SortFunc(ifaces, func(a, b net.Interface) int { return cmp.Compare(a.Name, b.Name) }) for _, iface := range ifaces { if len(iface.HardwareAddr) != 6 { diff --git a/protocol/aep/aep.go b/protocol/aep/aep.go new file mode 100644 index 0000000..e8e97f7 --- /dev/null +++ b/protocol/aep/aep.go @@ -0,0 +1,23 @@ +// Package aep defines the AppleTalk Echo Protocol wire constants: +// statically-assigned socket, DDP type, and the request/reply command +// bytes carried in the first byte of the AEP payload. +// +// This package is wire-format only. The AEP service implementation +// (responder goroutine, router wiring) lives in service/aep. +// +// References: +// - Inside Macintosh: Networking, Chapter 3 +// https://dev.os9.ca/techpubs/mac/Networking/Networking-115.html +package aep + +const ( + // Socket is the statically-assigned AEP socket number. + Socket = 4 + // DDPType is the DDP packet type for AEP packets. + DDPType = 4 + + // CmdRequest is the AEP command byte for an echo request. + CmdRequest = 1 + // CmdReply is the AEP command byte for an echo reply. + CmdReply = 2 +) diff --git a/protocol/asp/asp.go b/protocol/asp/asp.go new file mode 100644 index 0000000..91fbac8 --- /dev/null +++ b/protocol/asp/asp.go @@ -0,0 +1,283 @@ +/* +Package asp defines the AppleTalk Session Protocol (ASP) wire format: +SPFunction codes, error codes, version number, the per-message packet +types and their (un)marshallers, and ATP-derived size constants. + +ASP runs on top of ATP (TReq/TResp) and provides session-oriented +client/server communication. AFP is its primary user. + +This package is wire-format only -- no I/O, no goroutines, no state. +The ASP server, session state machine, and tickle/attention timers +live in service/asp. + +References: + - Inside AppleTalk, 2nd Edition, Chapter 11 + - Inside Macintosh: Networking, Chapter 8 +*/ +package asp + +import ( + "time" + + "github.com/pgodw/omnitalk/pkg/binutil" +) + +// --------------------------------------------------------------------------- +// SPFunction codes — first byte (MSB) of ATP UserData in every ASP packet. +// Inside AppleTalk, 2nd Edition, Chapter 11, §"SPFunction values". +// --------------------------------------------------------------------------- + +const ( + SPFuncCloseSess = 1 // workstation → server + SPFuncCommand = 2 // workstation → server + SPFuncGetStatus = 3 // workstation → server + SPFuncOpenSess = 4 // workstation → server + SPFuncTickle = 5 // both directions + SPFuncWrite = 6 // workstation → server (phase 1 of two-phase write) + SPFuncWriteContinue = 7 // server → workstation (phase 2: server requests write data) + SPFuncAttention = 8 // server → workstation +) + +// --------------------------------------------------------------------------- +// ASP protocol version number — §"Opening a session". +// The OpenSess packet carries this in the 2-byte version field. +// --------------------------------------------------------------------------- + +const Version uint16 = 0x0100 + +// --------------------------------------------------------------------------- +// Timer values — §"Timeouts and retry counts" / §"Maintaining the session". +// --------------------------------------------------------------------------- + +const ( + // TickleInterval is the period between keep-alive tickle packets (spec: 30 s). + TickleInterval = 30 * time.Second + + // SessionMaintenanceTimeout is the inactivity duration after which a session + // is assumed dead (spec: 2 minutes). + SessionMaintenanceTimeout = 2 * time.Minute +) + +// --------------------------------------------------------------------------- +// ASP Error Codes — Inside Macintosh: Networking, Chapter 8. +// Decimal / hex values per the spec table. +// --------------------------------------------------------------------------- + +const ( + SPErrorNoError = 0 // $00 — no error (both ends) + SPErrorBadVersNum = -1066 // $FBD6 — workstation end only + SPErrorBufTooSmall = -1067 // $FBD5 — workstation end only + SPErrorNoMoreSessions = -1068 // $FBD4 — both ends + SPErrorNoServers = -1069 // $FBD3 — workstation end only + SPErrorParamErr = -1070 // $FBD2 — both ends + SPErrorServerBusy = -1071 // $FBD1 — workstation end only + SPErrorSessClosed = -1072 // $FBD0 — both ends + SPErrorSizeErr = -1073 // $FBCF — both ends + SPErrorTooManyClients = -1074 // $FBCE — server end only + SPErrorNoAck = -1075 // $FBCD — server end only +) + +// AFP attention codes sent via SPFuncAttention. +// The attention word is a 16-bit value placed in the 2-byte ATP data payload. +// See Inside Macintosh: Files, Chapter 3 (AFP). +const ( + // AspAttnServerGoingDown signals that the AFP server is shutting down. + // Bit 15 is the "server is going down" flag defined by the AFP spec. + AspAttnServerGoingDown uint16 = 0x8000 +) + +// --------------------------------------------------------------------------- +// ATP-derived size constants. +// --------------------------------------------------------------------------- + +const ( + // ATPMaxData is the maximum data payload per ATP response packet. + // DDP max data = 586 bytes; ATP header = 8 bytes → 578 bytes. + ATPMaxData = 578 + + // ATPMaxPackets is the maximum number of response packets in a single + // ATP transaction (bitmap has 8 bits). + ATPMaxPackets = 8 + + // QuantumSize is the maximum size reply block (or SPWrtContinue write data) + // on a standard AppleTalk network: 8 × 578 = 4624 bytes. + // On LocalTalk the client reports a smaller bitmap (typically 1 packet = 578). + QuantumSize = ATPMaxData * ATPMaxPackets +) + +// --------------------------------------------------------------------------- +// SPGetParms — local API call (no network packet). +// +// Before any sessions are opened, both the workstation ASP client and the +// server ASP client should interrogate ASP to identify the maximum sizes of +// commands and replies allowed by the underlying transport mechanism. +// On a standard AppleTalk network (ASP over ATP): MaxCmdSize = 578 bytes, +// QuantumSize = 4624 bytes. For transports other than ATP these may differ. +// --------------------------------------------------------------------------- + +// GetParmsResult holds the values returned by an SPGetParms call. +type GetParmsResult struct { + MaxCmdSize uint16 // maximum size of a command block (bytes) + QuantumSize uint16 // maximum size of a reply block or SPWrtContinue write data (bytes) +} + +// =================================================================== +// Packet types — one struct per SPFunction. +// +// UserData byte layout (MSB first, 4 bytes in ATP header): +// [0] SPFunction +// [1] SessionID (or WSSSocket for OpenSess request) +// [2:3] SeqNum / VersionNum / AttentionCode / 0 +// =================================================================== + +// OpenSessPacket represents an incoming ASP OpenSess request. +type OpenSessPacket struct { + WSSSocket uint8 // workstation session socket + VersionNum uint16 // ASP version number (expected: Version = 0x0100) +} + +// ParseOpenSessPacket extracts fields from the ATP UserData of an OpenSess TReq. +func ParseOpenSessPacket(userData uint32) OpenSessPacket { + return OpenSessPacket{ + WSSSocket: uint8((userData >> 16) & 0xFF), + VersionNum: uint16(userData & 0xFFFF), + } +} + +// OpenSessReplyPacket represents an outgoing ASP OpenSess reply. +type OpenSessReplyPacket struct { + SSSSocket uint8 // server session socket + SessionID uint8 + ErrorCode int16 // 0 = success; SPErrorBadVersNum, SPErrorServerBusy, SPErrorTooManyClients +} + +// MarshalUserData encodes the reply into the 4-byte ATP UserData field. +// +// [0] SSSSocket [1] SessionID [2:3] ErrorCode (big-endian) +func (p OpenSessReplyPacket) MarshalUserData() uint32 { + return (uint32(p.SSSSocket) << 24) | + (uint32(p.SessionID) << 16) | + uint32(uint16(p.ErrorCode)) +} + +// CloseSessPacket represents an incoming ASP CloseSess request. +type CloseSessPacket struct { + SessionID uint8 +} + +// ParseCloseSessPacket extracts fields from the ATP UserData of a CloseSess TReq. +func ParseCloseSessPacket(userData uint32) CloseSessPacket { + return CloseSessPacket{ + SessionID: uint8((userData >> 16) & 0xFF), + } +} + +// CloseSessReplyUserData returns the ATP UserData for a CloseSess reply (all zeros). +func CloseSessReplyUserData() uint32 { return 0 } + +// GetStatusPacket represents an incoming ASP GetStatus request. +// No fields beyond SPFunction; the rest of UserData is zero per spec. +type GetStatusPacket struct{} + +// ParseGetStatusPacket is provided for completeness; UserData is unused. +func ParseGetStatusPacket(_ uint32) GetStatusPacket { return GetStatusPacket{} } + +// CommandPacket represents an incoming ASP Command request. +type CommandPacket struct { + SessionID uint8 + SeqNum uint16 + CmdBlock []byte // AFP command block (ATP data payload) +} + +// ParseCommandPacket extracts fields from the ATP UserData and payload. +func ParseCommandPacket(userData uint32, payload []byte) CommandPacket { + return CommandPacket{ + SessionID: uint8((userData >> 16) & 0xFF), + SeqNum: uint16(userData & 0xFFFF), + CmdBlock: payload, + } +} + +// WritePacket represents an incoming ASP Write request (same layout as Command). +type WritePacket struct { + SessionID uint8 + SeqNum uint16 + CmdBlock []byte // AFP command block (e.g. FPWrite header) +} + +// ParseWritePacket extracts fields from the ATP UserData and payload. +func ParseWritePacket(userData uint32, payload []byte) WritePacket { + return WritePacket{ + SessionID: uint8((userData >> 16) & 0xFF), + SeqNum: uint16(userData & 0xFFFF), + CmdBlock: payload, + } +} + +// WriteContinuePacket represents an outgoing ASP WriteContinue request. +type WriteContinuePacket struct { + SessionID uint8 + SeqNum uint16 // same sequence number as the original Write + BufferSize uint16 // available buffer size (bytes the server wants) +} + +// MarshalUserData encodes the WriteContinue into the 4-byte ATP UserData. +// +// [0] SPFuncWriteContinue [1] SessionID [2:3] SeqNum +func (p WriteContinuePacket) MarshalUserData() uint32 { + return (uint32(SPFuncWriteContinue) << 24) | + (uint32(p.SessionID) << 16) | + uint32(p.SeqNum) +} + +// MarshalData returns the 2-byte ATP data payload (buffer size, big-endian). +func (p WriteContinuePacket) MarshalData() []byte { + b := make([]byte, p.WireSize()) + _, _ = p.MarshalWire(b) + return b +} + +// WireSize returns the fixed 2-byte size of the ATP data payload. +func (p WriteContinuePacket) WireSize() int { return 2 } + +// MarshalWire encodes BufferSize big-endian into b[0:2]. +func (p WriteContinuePacket) MarshalWire(b []byte) (int, error) { + return binutil.PutU16(b, p.BufferSize) +} + +// UnmarshalWire decodes BufferSize from b[0:2]. +func (p *WriteContinuePacket) UnmarshalWire(b []byte) (int, error) { + v, n, err := binutil.GetU16(b) + if err != nil { + return 0, err + } + p.BufferSize = v + return n, nil +} + +// TicklePacket represents an outgoing ASP Tickle. +type TicklePacket struct { + SessionID uint8 +} + +// MarshalUserData encodes the Tickle into the 4-byte ATP UserData. +// +// [0] SPFuncTickle [1] SessionID [2:3] 0 +func (p TicklePacket) MarshalUserData() uint32 { + return (uint32(SPFuncTickle) << 24) | (uint32(p.SessionID) << 16) +} + +// AttentionPacket represents an outgoing ASP Attention. +type AttentionPacket struct { + SessionID uint8 + AttentionCode uint16 // must be non-zero per spec +} + +// MarshalUserData encodes the Attention into the 4-byte ATP UserData. +// +// [0] SPFuncAttention [1] SessionID [2:3] AttentionCode +func (p AttentionPacket) MarshalUserData() uint32 { + return (uint32(SPFuncAttention) << 24) | + (uint32(p.SessionID) << 16) | + uint32(p.AttentionCode) +} diff --git a/protocol/asp/asp_wire_test.go b/protocol/asp/asp_wire_test.go new file mode 100644 index 0000000..4dfe8de --- /dev/null +++ b/protocol/asp/asp_wire_test.go @@ -0,0 +1,85 @@ +package asp + +import ( + "bytes" + "testing" +) + +func TestOpenSessReplyPacket_MarshalUserData(t *testing.T) { + t.Parallel() + p := OpenSessReplyPacket{SSSSocket: 0xAB, SessionID: 0xCD, ErrorCode: SPErrorBadVersNum} + got := p.MarshalUserData() + // SSSSocket=0xAB << 24 | SessionID=0xCD << 16 | uint16(-1066)=0xFBD6 + const want uint32 = 0xABCDFBD6 + if got != want { + t.Fatalf("MarshalUserData = %#08x, want %#08x", got, want) + } +} + +func TestParseOpenSessPacket(t *testing.T) { + t.Parallel() + got := ParseOpenSessPacket(0xAA112233) + if got.WSSSocket != 0x11 || got.VersionNum != 0x2233 { + t.Fatalf("ParseOpenSessPacket = %+v, want WSSSocket=0x11 VersionNum=0x2233", got) + } +} + +func TestParseCommandPacket(t *testing.T) { + t.Parallel() + payload := []byte{1, 2, 3} + got := ParseCommandPacket(0xAA071234, payload) + if got.SessionID != 0x07 || got.SeqNum != 0x1234 || !bytes.Equal(got.CmdBlock, payload) { + t.Fatalf("ParseCommandPacket = %+v, want SessionID=7 SeqNum=0x1234 CmdBlock=%v", got, payload) + } +} + +func TestWriteContinuePacket_WireRoundTrip(t *testing.T) { + t.Parallel() + p := WriteContinuePacket{SessionID: 0x07, SeqNum: 0x1234, BufferSize: 0xABCD} + + const wantUserData uint32 = uint32(SPFuncWriteContinue)<<24 | 0x07<<16 | 0x1234 + if got := p.MarshalUserData(); got != wantUserData { + t.Fatalf("MarshalUserData = %#08x, want %#08x", got, wantUserData) + } + + if p.WireSize() != 2 { + t.Fatalf("WireSize = %d, want 2", p.WireSize()) + } + + buf := make([]byte, p.WireSize()) + n, err := p.MarshalWire(buf) + if err != nil { + t.Fatalf("MarshalWire: %v", err) + } + if n != 2 || !bytes.Equal(buf, []byte{0xAB, 0xCD}) { + t.Fatalf("MarshalWire buf = % x (n=%d), want ab cd", buf, n) + } + + var out WriteContinuePacket + if _, err := out.UnmarshalWire(buf); err != nil { + t.Fatalf("UnmarshalWire: %v", err) + } + if out.BufferSize != p.BufferSize { + t.Fatalf("round-trip BufferSize = %#x, want %#x", out.BufferSize, p.BufferSize) + } +} + +func TestTicklePacket_MarshalUserData(t *testing.T) { + t.Parallel() + p := TicklePacket{SessionID: 0x42} + got := p.MarshalUserData() + const want uint32 = uint32(SPFuncTickle)<<24 | 0x42<<16 + if got != want { + t.Fatalf("MarshalUserData = %#08x, want %#08x", got, want) + } +} + +func TestAttentionPacket_MarshalUserData(t *testing.T) { + t.Parallel() + p := AttentionPacket{SessionID: 0x09, AttentionCode: AspAttnServerGoingDown} + got := p.MarshalUserData() + const want uint32 = uint32(SPFuncAttention)<<24 | 0x09<<16 | uint32(AspAttnServerGoingDown) + if got != want { + t.Fatalf("MarshalUserData = %#08x, want %#08x", got, want) + } +} diff --git a/protocol/asp/fuzz_test.go b/protocol/asp/fuzz_test.go new file mode 100644 index 0000000..6a70582 --- /dev/null +++ b/protocol/asp/fuzz_test.go @@ -0,0 +1,29 @@ +//go:build afp || all + +package asp + +import "testing" + +func FuzzParseCommandPacket(f *testing.F) { + f.Add(uint32(0), []byte{}) + f.Add(uint32(0x01000000), []byte{0x01, 0x02, 0x03}) + f.Fuzz(func(_ *testing.T, ud uint32, payload []byte) { + _ = ParseCommandPacket(ud, payload) + }) +} + +func FuzzParseWritePacket(f *testing.F) { + f.Add(uint32(0), []byte{}) + f.Add(uint32(0xDEADBEEF), []byte{0xFF, 0x00, 0x42}) + f.Fuzz(func(_ *testing.T, ud uint32, payload []byte) { + _ = ParseWritePacket(ud, payload) + }) +} + +func FuzzParseOpenSessPacket(f *testing.F) { + f.Add(uint32(0)) + f.Add(uint32(0x01000100)) + f.Fuzz(func(_ *testing.T, ud uint32) { + _ = ParseOpenSessPacket(ud) + }) +} diff --git a/service/atp/atp.go b/protocol/atp/atp.go similarity index 63% rename from service/atp/atp.go rename to protocol/atp/atp.go index f664415..b299d87 100644 --- a/service/atp/atp.go +++ b/protocol/atp/atp.go @@ -10,12 +10,12 @@ https://dev.os9.ca/techpubs/mac/Networking/Networking-143.html#HEADING143-0 package atp import ( - "encoding/binary" "errors" "fmt" "time" - "github.com/pgodw/omnitalk/go/appletalk" + "github.com/pgodw/omnitalk/pkg/binutil" + "github.com/pgodw/omnitalk/protocol" ) // ATP Control bit masks. @@ -41,16 +41,16 @@ const ( ) // FuncCode returns the function code (TReq, TResp, or TRel) from the header. -func (h *ATPHeader) FuncCode() FuncCode { return FuncCode(h.Control & FuncMask) } +func (h *Header) FuncCode() FuncCode { return FuncCode(h.Control & FuncMask) } // XO returns true if the XO bit is set. -func (h *ATPHeader) XO() bool { return h.Control&XO != 0 } +func (h *Header) XO() bool { return h.Control&XO != 0 } // EOM returns true if the EOM bit is set. -func (h *ATPHeader) EOM() bool { return h.Control&EOM != 0 } +func (h *Header) EOM() bool { return h.Control&EOM != 0 } // STS returns true if the STS bit is set. -func (h *ATPHeader) STS() bool { return h.Control&STS != 0 } +func (h *Header) STS() bool { return h.Control&STS != 0 } // TRelTimeout encodes the 3-bit TRel timeout indicator carried in the low // bits of the control byte for XO TReq packets. @@ -83,12 +83,12 @@ func (t TRelTimeout) Duration() time.Duration { } // GetTRelTimeout extracts the TRel timeout indicator from the control byte. -func (h *ATPHeader) GetTRelTimeout() TRelTimeout { +func (h *Header) GetTRelTimeout() TRelTimeout { return TRelTimeout(h.Control & 0x07) } // SetTRelTimeout encodes the TRel timeout indicator into the control byte. -func (h *ATPHeader) SetTRelTimeout(t TRelTimeout) { +func (h *Header) SetTRelTimeout(t TRelTimeout) { h.Control = (h.Control &^ 0x07) | (uint8(t) & 0x07) } @@ -101,10 +101,10 @@ const ( MaxATPData = 578 ) -// DDPTypeATP is the DDP type for ATP packets. -const DDPTypeATP = 3 +// DDPType is the DDP type for ATP packets. +const DDPType = 3 -// ATPHeader represents an ATP packet header. +// Header represents an ATP packet header. // Refer: https://dev.os9.ca/techpubs/mac/Networking/Networking-145.html#HEADING145-0 // // 0 1 2 3 @@ -114,40 +114,62 @@ const DDPTypeATP = 3 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // | User Data | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -type ATPHeader struct { +type Header struct { Control uint8 Bitmap uint8 // Sequence number for TRESP, bitmap for TREQ TransID uint16 UserData uint32 } -// ATPHeaderSize is the size of an ATP header in bytes. -const ATPHeaderSize = 8 +// HeaderSize is the size of an ATP header in bytes. +const HeaderSize = 8 -// Marshal binary-encodes the ATP header. -func (h *ATPHeader) Marshal() []byte { - b := make([]byte, ATPHeaderSize) +// WireSize returns the fixed 8-byte ATP header size. +func (h *Header) WireSize() int { return HeaderSize } + +// MarshalWire encodes the header into b. Returns ErrShortBuffer if +// len(b) < HeaderSize. +func (h *Header) MarshalWire(b []byte) (int, error) { + if len(b) < HeaderSize { + return 0, binutil.ErrShortBuffer + } b[0] = h.Control b[1] = h.Bitmap - binary.BigEndian.PutUint16(b[2:4], h.TransID) - binary.BigEndian.PutUint32(b[4:8], h.UserData) + _, _ = binutil.PutU16(b[2:], h.TransID) + _, _ = binutil.PutU32(b[4:], h.UserData) + return HeaderSize, nil +} + +// UnmarshalWire decodes the header from b. +func (h *Header) UnmarshalWire(b []byte) (int, error) { + if len(b) < HeaderSize { + return 0, binutil.ErrShortBuffer + } + h.Control = b[0] + h.Bitmap = b[1] + h.TransID, _, _ = binutil.GetU16(b[2:]) + h.UserData, _, _ = binutil.GetU32(b[4:]) + return HeaderSize, nil +} + +// Marshal binary-encodes the ATP header. Allocates; prefer MarshalWire. +func (h *Header) Marshal() []byte { + b := make([]byte, HeaderSize) + _, _ = h.MarshalWire(b) return b } // Unmarshal binary-decodes the ATP header. -func (h *ATPHeader) Unmarshal(b []byte) error { - if len(b) < ATPHeaderSize { +func (h *Header) Unmarshal(b []byte) error { + _, err := h.UnmarshalWire(b) + if err == binutil.ErrShortBuffer { return errors.New("packet too short for ATP header") } - h.Control = b[0] - h.Bitmap = b[1] - h.TransID = binary.BigEndian.Uint16(b[2:4]) - h.UserData = binary.BigEndian.Uint32(b[4:8]) - return nil + return err } -func (h *ATPHeader) String() string { - return fmt.Sprintf("ATPHeader{Control:0x%02x Bitmap:0x%02x TransID:%d UserData:0x%08x}", h.Control, h.Bitmap, h.TransID, h.UserData) +func (h *Header) String() string { + return fmt.Sprintf("Header{Control:0x%02x Bitmap:0x%02x TransID:%d UserData:0x%08x}", h.Control, h.Bitmap, h.TransID, h.UserData) } -var _ appletalk.Packet = (*ATPHeader)(nil) +var _ protocol.Packet = (*Header)(nil) diff --git a/protocol/atp/atp_wire_test.go b/protocol/atp/atp_wire_test.go new file mode 100644 index 0000000..5cb21b2 --- /dev/null +++ b/protocol/atp/atp_wire_test.go @@ -0,0 +1,48 @@ +package atp + +import ( + "bytes" + "testing" +) + +func TestATPHeaderWireGolden(t *testing.T) { + t.Parallel() + h := Header{ + Control: 0x40, + Bitmap: 0xFF, + TransID: 0x1234, + UserData: 0xDEADBEEF, + } + want := []byte{0x40, 0xFF, 0x12, 0x34, 0xDE, 0xAD, 0xBE, 0xEF} + + buf := make([]byte, h.WireSize()) + n, err := h.MarshalWire(buf) + if err != nil { + t.Fatalf("MarshalWire: %v", err) + } + if n != HeaderSize { + t.Fatalf("n = %d, want %d", n, HeaderSize) + } + if !bytes.Equal(buf, want) { + t.Fatalf("MarshalWire = % x, want % x", buf, want) + } + + var out Header + if _, err := out.UnmarshalWire(buf); err != nil { + t.Fatalf("UnmarshalWire: %v", err) + } + if out != h { + t.Fatalf("round-trip mismatch: got %+v, want %+v", out, h) + } +} + +func TestATPHeaderShortBuffer(t *testing.T) { + t.Parallel() + h := Header{} + if _, err := h.MarshalWire(make([]byte, 7)); err == nil { + t.Fatal("expected ErrShortBuffer on short marshal") + } + if _, err := h.UnmarshalWire(make([]byte, 7)); err == nil { + t.Fatal("expected ErrShortBuffer on short unmarshal") + } +} diff --git a/protocol/atp/bench_test.go b/protocol/atp/bench_test.go new file mode 100644 index 0000000..cba88f9 --- /dev/null +++ b/protocol/atp/bench_test.go @@ -0,0 +1,32 @@ +package atp + +import "testing" + +func BenchmarkHeaderMarshalWire(b *testing.B) { + h := Header{Control: 0x40, Bitmap: 0xFF, TransID: 0x1234, UserData: 0xDEADBEEF} + buf := make([]byte, HeaderSize) + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, _ = h.MarshalWire(buf) + } +} + +func BenchmarkHeaderUnmarshalWire(b *testing.B) { + src := []byte{0x40, 0xFF, 0x12, 0x34, 0xDE, 0xAD, 0xBE, 0xEF} + var h Header + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, _ = h.UnmarshalWire(src) + } +} + +func BenchmarkHeaderRoundTrip(b *testing.B) { + h := Header{Control: 0x40, Bitmap: 0xFF, TransID: 0x1234, UserData: 0xDEADBEEF} + buf := make([]byte, HeaderSize) + var out Header + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, _ = h.MarshalWire(buf) + _, _ = out.UnmarshalWire(buf) + } +} diff --git a/protocol/atp/doc.go b/protocol/atp/doc.go new file mode 100644 index 0000000..7caa260 --- /dev/null +++ b/protocol/atp/doc.go @@ -0,0 +1,14 @@ +// Package atp defines the AppleTalk Transaction Protocol wire format: +// header layout, control-bit constants, function codes, the TRel timeout +// indicator, and Marshal/Unmarshal helpers via pkg/binutil. +// +// This package is wire-format only — no I/O, no goroutines, no state. +// The transaction state machine (Endpoint, TCB/RspCB, retry/release +// timers) lives in service/atp. +// +// References: +// - Inside Macintosh: Networking, Chapter 6 +// https://dev.os9.ca/techpubs/mac/Networking/Networking-143.html +// - ATP packet format +// https://dev.os9.ca/techpubs/mac/Networking/Networking-145.html +package atp diff --git a/protocol/atp/fuzz_test.go b/protocol/atp/fuzz_test.go new file mode 100644 index 0000000..86d4cfb --- /dev/null +++ b/protocol/atp/fuzz_test.go @@ -0,0 +1,12 @@ +package atp + +import "testing" + +func FuzzATPHeaderUnmarshal(f *testing.F) { + f.Add(make([]byte, 8)) + f.Add(make([]byte, 32)) + f.Fuzz(func(t *testing.T, data []byte) { + var h Header + _, _ = h.UnmarshalWire(data) + }) +} diff --git a/protocol/ddp/doc.go b/protocol/ddp/doc.go new file mode 100644 index 0000000..6bd1832 --- /dev/null +++ b/protocol/ddp/doc.go @@ -0,0 +1,18 @@ +/* +Package ddp defines the Datagram Delivery Protocol (DDP) wire format: +the long-header datagram struct, its marshal/unmarshal helpers, the +checksum algorithm, and the protocol's data-length cap. + +DDP is the AppleTalk network-layer datagram protocol — every higher-level +AppleTalk protocol (ATP, ASP, AEP, RTMP, ZIP, NBP, AFP-over-ASP) is +encapsulated in DDP datagrams and routed by destination network/node. + +This package is wire-format only — no I/O, no goroutines, no state. +Routing, port abstraction, and packet dispatch live elsewhere +(router/, port/, service/*). + +References: + - Inside AppleTalk, 2nd Edition, Chapter 4 + - Inside Macintosh: Networking, Chapter 1 +*/ +package ddp diff --git a/protocol/ddp/fuzz_test.go b/protocol/ddp/fuzz_test.go new file mode 100644 index 0000000..7868620 --- /dev/null +++ b/protocol/ddp/fuzz_test.go @@ -0,0 +1,23 @@ +package ddp + +import "testing" + +func FuzzDatagramFromLongHeaderBytes(f *testing.F) { + // Seed with a minimum-valid DDP long header (13 bytes, no payload). + f.Add(make([]byte, 13)) + f.Add(make([]byte, 64)) + f.Fuzz(func(t *testing.T, data []byte) { + // Decoder must never panic on arbitrary input — including + // truncated headers, oversized lengths, or bad checksums. + _, _ = DatagramFromLongHeaderBytes(data, false) + _, _ = DatagramFromLongHeaderBytes(data, true) + }) +} + +func FuzzDatagramFromShortHeaderBytes(f *testing.F) { + f.Add(uint8(0), uint8(0), make([]byte, 5)) + f.Add(uint8(1), uint8(2), make([]byte, 32)) + f.Fuzz(func(t *testing.T, dst, src uint8, data []byte) { + _, _ = DatagramFromShortHeaderBytes(dst, src, data) + }) +} diff --git a/protocol/llap/fuzz_test.go b/protocol/llap/fuzz_test.go new file mode 100644 index 0000000..37fe3e7 --- /dev/null +++ b/protocol/llap/fuzz_test.go @@ -0,0 +1,16 @@ +package llap + +import "testing" + +func FuzzFrameFromBytes(f *testing.F) { + f.Add(make([]byte, 3)) + f.Add(make([]byte, 64)) + f.Fuzz(func(t *testing.T, data []byte) { + fr, err := FrameFromBytes(data) + if err != nil { + return + } + _ = fr.Validate() + _ = fr.Bytes() + }) +} diff --git a/protocol/llap/llap.go b/protocol/llap/llap.go new file mode 100644 index 0000000..9cfe552 --- /dev/null +++ b/protocol/llap/llap.go @@ -0,0 +1,96 @@ +// Package llap defines the LocalTalk Link Access Protocol wire format +// (frame layout, control/data type codes, validation). It contains no +// I/O or state-machine logic — see service/llap for the access-control +// state machine and port/localtalk for the link-layer transports that +// carry LLAP frames over UDP, TashTalk, or virtual cables. +// +// Reference: spec/06-llap.md and Inside AppleTalk, 2nd ed., chapter 1. +package llap + +import "fmt" + +// Control- and data-type codes carried in the third byte of an LLAP +// frame. Data types (< 0x80) carry an AppleTalk DDP header; control +// types (>= 0x80) participate in the access-control handshake. +const ( + TypeAppleTalkShortHeader = 0x01 + TypeAppleTalkLongHeader = 0x02 + TypeENQ = 0x81 + TypeACK = 0x82 + TypeRTS = 0x84 + TypeCTS = 0x85 +) + +// BroadcastNode is the LLAP destination address that selects every node +// on the LocalTalk segment. +const BroadcastNode = 0xFF + +// MaxDataSize is the largest payload an LLAP data frame may carry. +const MaxDataSize = 600 + +// Frame is the wire form of an LLAP frame: destination, source, type, +// and an optional payload (data frames only). The 2-byte trailing FCS +// that appears on the cable is handled by the link layer and is not +// represented here. +type Frame struct { + DestinationNode uint8 + SourceNode uint8 + Type uint8 + Payload []byte +} + +// FrameFromBytes parses a wire-form LLAP frame. The returned Frame's +// Payload is a copy and does not alias b. +func FrameFromBytes(b []byte) (Frame, error) { + if len(b) < 3 { + return Frame{}, fmt.Errorf("LLAP frame too short: %d", len(b)) + } + f := Frame{ + DestinationNode: b[0], + SourceNode: b[1], + Type: b[2], + Payload: append([]byte(nil), b[3:]...), + } + if err := f.Validate(); err != nil { + return Frame{}, err + } + return f, nil +} + +// Validate reports whether f is a well-formed LLAP frame. +func (f Frame) Validate() error { + if f.IsControl() { + if len(f.Payload) != 0 { + return fmt.Errorf("LLAP control frame 0x%02X has payload length %d", f.Type, len(f.Payload)) + } + switch f.Type { + case TypeENQ, TypeACK, TypeRTS, TypeCTS: + return nil + default: + return fmt.Errorf("invalid LLAP control type 0x%02X", f.Type) + } + } + if !f.IsData() { + return fmt.Errorf("invalid LLAP frame type 0x%02X", f.Type) + } + if len(f.Payload) > MaxDataSize { + return fmt.Errorf("LLAP payload too large: %d", len(f.Payload)) + } + return nil +} + +// IsControl reports whether f is a link-control frame (ENQ/ACK/RTS/CTS). +func (f Frame) IsControl() bool { return f.Type >= 0x80 } + +// IsData reports whether f carries an AppleTalk DDP datagram. +func (f Frame) IsData() bool { + return f.Type == TypeAppleTalkShortHeader || f.Type == TypeAppleTalkLongHeader +} + +// Bytes returns the wire encoding of f. +func (f Frame) Bytes() []byte { + out := make([]byte, 0, 3+len(f.Payload)) + out = append(out, f.DestinationNode, f.SourceNode, f.Type) + out = append(out, f.Payload...) + return out +} diff --git a/protocol/llap/llap_test.go b/protocol/llap/llap_test.go new file mode 100644 index 0000000..cb7b76c --- /dev/null +++ b/protocol/llap/llap_test.go @@ -0,0 +1,55 @@ +package llap + +import ( + "bytes" + "testing" +) + +func TestFrameRoundTrip(t *testing.T) { + t.Parallel() + cases := []struct { + name string + f Frame + }{ + {"data short header", Frame{DestinationNode: 1, SourceNode: 2, Type: TypeAppleTalkShortHeader, Payload: []byte{0xDE, 0xAD}}}, + {"data long header", Frame{DestinationNode: 0xFF, SourceNode: 0x42, Type: TypeAppleTalkLongHeader, Payload: bytes.Repeat([]byte{0x55}, 64)}}, + {"control ENQ", Frame{DestinationNode: 0xFE, SourceNode: 0xFE, Type: TypeENQ}}, + {"control CTS", Frame{DestinationNode: 0x10, SourceNode: 0x20, Type: TypeCTS}}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + b := tc.f.Bytes() + got, err := FrameFromBytes(b) + if err != nil { + t.Fatalf("FrameFromBytes: %v", err) + } + if got.DestinationNode != tc.f.DestinationNode || got.SourceNode != tc.f.SourceNode || got.Type != tc.f.Type { + t.Fatalf("header mismatch: got %+v want %+v", got, tc.f) + } + if !bytes.Equal(got.Payload, tc.f.Payload) { + t.Fatalf("payload mismatch: got %x want %x", got.Payload, tc.f.Payload) + } + }) + } +} + +func TestFrameValidate(t *testing.T) { + t.Parallel() + if err := (Frame{Type: TypeENQ, Payload: []byte{0x00}}).Validate(); err == nil { + t.Fatal("control frame with payload should fail validation") + } + if err := (Frame{Type: 0x77}).Validate(); err == nil { + t.Fatal("unknown frame type should fail validation") + } + if err := (Frame{Type: TypeAppleTalkShortHeader, Payload: bytes.Repeat([]byte{0}, MaxDataSize+1)}).Validate(); err == nil { + t.Fatal("oversize payload should fail validation") + } +} + +func TestFrameFromBytesShort(t *testing.T) { + t.Parallel() + if _, err := FrameFromBytes([]byte{0x01, 0x02}); err == nil { + t.Fatal("expected error for too-short frame") + } +} diff --git a/protocol/nbp/fuzz_test.go b/protocol/nbp/fuzz_test.go new file mode 100644 index 0000000..430c400 --- /dev/null +++ b/protocol/nbp/fuzz_test.go @@ -0,0 +1,18 @@ +package nbp + +import "testing" + +func FuzzParsePacket(f *testing.F) { + f.Add(make([]byte, 8)) + // Seed with a minimal valid LkUp tuple (Foo:Bar@*). + f.Add([]byte{ + (CtrlLkUp << 4) | 1, + 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, + 3, 'F', 'o', 'o', + 3, 'B', 'a', 'r', + 1, '*', + }) + f.Fuzz(func(t *testing.T, data []byte) { + _, _ = ParsePacket(data) + }) +} diff --git a/protocol/nbp/nbp.go b/protocol/nbp/nbp.go new file mode 100644 index 0000000..49f2178 --- /dev/null +++ b/protocol/nbp/nbp.go @@ -0,0 +1,146 @@ +// Package nbp defines the AppleTalk Name Binding Protocol wire format +// (function codes, tuple layout, packet parser/builder, and the small +// matching primitives used by lookup). It contains no I/O or service +// state — see service/zip.NameInformationService for the registry and +// routing logic that uses these types. +// +// Reference: spec/04-nbp.md and Inside AppleTalk, 2nd ed., chapter 7. +package nbp + +import ( + "bytes" + "errors" +) + +// Well-known DDP socket and DDP type for NBP traffic. +const ( + SASSocket = 2 + DDPType = 2 +) + +// NBP control function codes carried in the high nibble of the first +// byte of an NBP packet. The low nibble carries the tuple count. +const ( + CtrlBrRq = 1 // Broadcast request + CtrlLkUp = 2 // Lookup + CtrlLkUpRply = 3 // Lookup reply + CtrlFwd = 4 // Forward request +) + +// Wildcards used in BrRq / LkUp lookups. +const ( + NameWildcard = '=' + ZoneWildcard = '*' +) + +// ErrMalformed is returned when an inbound packet cannot be decoded. +var ErrMalformed = errors.New("nbp: malformed packet") + +// Tuple is a single NBP tuple: an address (network/node/socket), an +// enumerator, and an entity name (object:type@zone). Inbound packets +// carry exactly one tuple in OmniTalk's NBP handler; LkUp-Rply may +// pack several but the registered service emits one per match. +type Tuple struct { + Network uint16 + Node uint8 + Socket uint8 + Enumerator uint8 + Object []byte + Type []byte + Zone []byte +} + +// Packet is a parsed NBP packet header plus the embedded tuple. +type Packet struct { + Function uint8 // CtrlBrRq, CtrlLkUp, CtrlLkUpRply, CtrlFwd + TupleCount uint8 + NBPID uint8 + Tuple Tuple +} + +// ParsePacket decodes the single-tuple form of an NBP packet from a DDP +// payload. It returns ErrMalformed if the layout is invalid or the +// declared lengths run past the buffer. +// +// On-wire layout: +// +// 0 1 2..3 4 5 6 7 +// +-------+------------+----------+----+----+----+ +// |fn|cnt | NBPID | network |node|sock|enum| +// +-------+------------+----------+----+----+----+ +// | obj | objBytes | typ | typBytes ... | zone | zoneBytes | +// +// Trailing zone-length zero is treated as the zone wildcard "*". +func ParsePacket(data []byte) (Packet, error) { + if len(data) < 8 { + return Packet{}, ErrMalformed + } + funcTupleCount := data[0] + pkt := Packet{ + Function: funcTupleCount >> 4, + TupleCount: funcTupleCount & 0x0F, + NBPID: data[1], + } + objLen := int(data[7]) + if objLen < 1 || len(data) < 8+objLen+1 { + return Packet{}, ErrMalformed + } + typLen := int(data[8+objLen]) + if typLen < 1 || len(data) < 9+objLen+typLen+1 { + return Packet{}, ErrMalformed + } + zoneLen := int(data[9+objLen+typLen]) + if len(data) < 10+objLen+typLen+zoneLen { + return Packet{}, ErrMalformed + } + pkt.Tuple = Tuple{ + Network: uint16(data[2])<<8 | uint16(data[3]), + Node: data[4], + Socket: data[5], + Enumerator: data[6], + Object: data[8 : 8+objLen], + Type: data[9+objLen : 9+objLen+typLen], + Zone: data[10+objLen+typLen : 10+objLen+typLen+zoneLen], + } + if len(pkt.Tuple.Zone) == 0 { + pkt.Tuple.Zone = []byte{ZoneWildcard} + } + return pkt, nil +} + +// BuildLkUpRply encodes a single-tuple LkUp-Rply packet. The returned +// slice is freshly allocated. +func BuildLkUpRply(nbpID byte, network uint16, node, socket uint8, obj, typ, zone []byte) []byte { + out := make([]byte, 0, 12+len(obj)+len(typ)+len(zone)) + out = append(out, (CtrlLkUpRply<<4)|1) + out = append(out, nbpID) + out = append(out, byte(network>>8), byte(network)) + out = append(out, node) + out = append(out, socket) + out = append(out, 0) // enumerator + out = append(out, byte(len(obj))) + out = append(out, obj...) + out = append(out, byte(len(typ))) + out = append(out, typ...) + out = append(out, byte(len(zone))) + out = append(out, zone...) + return out +} + +// NameMatch reports whether the given pattern matches the registered +// name. NBP uses '=' as the wildcard for object and type fields. +func NameMatch(pattern, name []byte) bool { + if len(pattern) == 1 && pattern[0] == NameWildcard { + return true + } + return bytes.EqualFold(pattern, name) +} + +// ZoneMatch reports whether the given pattern matches the registered +// zone. NBP uses '*' as the zone wildcard. +func ZoneMatch(pattern, zone []byte) bool { + if len(pattern) == 1 && pattern[0] == ZoneWildcard { + return true + } + return bytes.EqualFold(pattern, zone) +} diff --git a/protocol/nbp/nbp_test.go b/protocol/nbp/nbp_test.go new file mode 100644 index 0000000..b6bb8ce --- /dev/null +++ b/protocol/nbp/nbp_test.go @@ -0,0 +1,113 @@ +package nbp + +import ( + "bytes" + "testing" +) + +func TestParsePacketLkUp(t *testing.T) { + t.Parallel() + // LkUp for "Foo:AFPServer@Eng" with reply addr 1.2.3.42 sock 4 enum 5 + obj, typ, zone := []byte("Foo"), []byte("AFPServer"), []byte("Eng") + data := []byte{ + (CtrlLkUp << 4) | 1, // function | tuple count + 0x77, // NBPID + 0x00, 0x01, // network 1 + 0x02, // node + 0x03, // socket + 0x04, // enumerator + byte(len(obj)), + } + data = append(data, obj...) + data = append(data, byte(len(typ))) + data = append(data, typ...) + data = append(data, byte(len(zone))) + data = append(data, zone...) + + pkt, err := ParsePacket(data) + if err != nil { + t.Fatalf("ParsePacket: %v", err) + } + if pkt.Function != CtrlLkUp || pkt.TupleCount != 1 || pkt.NBPID != 0x77 { + t.Fatalf("header mismatch: %+v", pkt) + } + if pkt.Tuple.Network != 1 || pkt.Tuple.Node != 2 || pkt.Tuple.Socket != 3 || pkt.Tuple.Enumerator != 4 { + t.Fatalf("tuple addr mismatch: %+v", pkt.Tuple) + } + if !bytes.Equal(pkt.Tuple.Object, obj) || !bytes.Equal(pkt.Tuple.Type, typ) || !bytes.Equal(pkt.Tuple.Zone, zone) { + t.Fatalf("tuple name mismatch: %+v", pkt.Tuple) + } +} + +func TestParsePacketEmptyZoneBecomesWildcard(t *testing.T) { + t.Parallel() + obj, typ := []byte("X"), []byte("Y") + data := []byte{(CtrlBrRq << 4) | 1, 0, 0, 0, 0, 0, 0, byte(len(obj))} + data = append(data, obj...) + data = append(data, byte(len(typ))) + data = append(data, typ...) + data = append(data, 0) // zoneLen = 0 + pkt, err := ParsePacket(data) + if err != nil { + t.Fatalf("ParsePacket: %v", err) + } + if string(pkt.Tuple.Zone) != "*" { + t.Fatalf("expected zone wildcard, got %q", pkt.Tuple.Zone) + } +} + +func TestParsePacketMalformed(t *testing.T) { + t.Parallel() + cases := [][]byte{ + nil, + {0x10, 0, 0, 0, 0, 0, 0}, // <8 bytes + {(CtrlLkUp << 4) | 1, 0, 0, 0, 0, 0, 0, 0}, // objLen=0 + } + for i, c := range cases { + if _, err := ParsePacket(c); err == nil { + t.Fatalf("case %d: expected error", i) + } + } +} + +func TestBuildLkUpRplyRoundTrip(t *testing.T) { + t.Parallel() + obj, typ, zone := []byte("Server"), []byte("AFPServer"), []byte("Mktg") + out := BuildLkUpRply(0x42, 0x1234, 0x55, 0x66, obj, typ, zone) + pkt, err := ParsePacket(out) + if err != nil { + t.Fatalf("ParsePacket: %v", err) + } + if pkt.Function != CtrlLkUpRply || pkt.NBPID != 0x42 { + t.Fatalf("header: %+v", pkt) + } + if pkt.Tuple.Network != 0x1234 || pkt.Tuple.Node != 0x55 || pkt.Tuple.Socket != 0x66 { + t.Fatalf("addr: %+v", pkt.Tuple) + } + if !bytes.Equal(pkt.Tuple.Object, obj) || !bytes.Equal(pkt.Tuple.Type, typ) || !bytes.Equal(pkt.Tuple.Zone, zone) { + t.Fatalf("name: %+v", pkt.Tuple) + } +} + +func TestNameMatch(t *testing.T) { + t.Parallel() + if !NameMatch([]byte{NameWildcard}, []byte("anything")) { + t.Fatal("= should match anything") + } + if !NameMatch([]byte("Foo"), []byte("foo")) { + t.Fatal("name match should be case-insensitive") + } + if NameMatch([]byte("Foo"), []byte("Bar")) { + t.Fatal("name mismatch should fail") + } +} + +func TestZoneMatch(t *testing.T) { + t.Parallel() + if !ZoneMatch([]byte{ZoneWildcard}, []byte("anything")) { + t.Fatal("* should match anything") + } + if !ZoneMatch([]byte("Eng"), []byte("eng")) { + t.Fatal("zone match should be case-insensitive") + } +} diff --git a/protocol/protocol.go b/protocol/protocol.go new file mode 100644 index 0000000..8acaf73 --- /dev/null +++ b/protocol/protocol.go @@ -0,0 +1,14 @@ +// Package protocol defines cross-protocol contracts used by OmniTalk's wire +// implementations (DDP, ATP, ASP, ZIP, RTMP, AEP, LLAP, NBP). Each protocol +// lives in its own subpackage; this package carries only interfaces common to +// all of them. +package protocol + +// Packet is the contract implemented by any AppleTalk protocol header or +// datagram that supports binary wire encoding/decoding and structured log +// formatting. +type Packet interface { + String() string + Marshal() []byte + Unmarshal(data []byte) error +} diff --git a/protocol/rtmp/rtmp.go b/protocol/rtmp/rtmp.go new file mode 100644 index 0000000..c4302c1 --- /dev/null +++ b/protocol/rtmp/rtmp.go @@ -0,0 +1,32 @@ +// Package rtmp defines the Routing Table Maintenance Protocol wire +// constants: statically-assigned socket, DDP types for data and request +// packets, RTMP version byte, function codes, and the special distance +// value used to advertise an unreachable network. +// +// This package is wire-format only. The RTMP responding/sending state +// machines and routing-table aging live in service/rtmp. +// +// References: +// - Inside Macintosh: Networking, Chapter 5 +// https://dev.os9.ca/techpubs/mac/Networking/Networking-129.html +package rtmp + +const ( + // SAS is the statically-assigned RTMP socket. + SAS = 1 + // DDPTypeData is the DDP type for RTMP Data packets (routing tuples). + DDPTypeData = 1 + // DDPTypeRequest is the DDP type for RTMP Request packets. + DDPTypeRequest = 5 + // Version is the RTMP version byte present in tuple packets. + Version = 0x82 + + // Function codes inside Request packets. + FuncRequest = 1 + FuncRDRSplitHorizon = 2 + FuncRDRNoSplitHorizon = 3 + + // NotifyNeighborDistance is the distance value used to advertise that + // a route has gone bad (Notify Neighbor). + NotifyNeighborDistance = 31 +) diff --git a/protocol/zip/zip.go b/protocol/zip/zip.go new file mode 100644 index 0000000..8e21149 --- /dev/null +++ b/protocol/zip/zip.go @@ -0,0 +1,40 @@ +// Package zip defines the Zone Information Protocol wire constants: +// DDP type, statically-assigned socket, function codes (Query/Reply/ +// GetNetInfo/ExtReply), GetNetInfo flag bits, and the ATP-carried ZIP +// function codes used in TReq UserBytes. +// +// This package is wire-format only. The ZIP responding/sending state +// machines live in service/zip. +// +// References: +// - Inside Macintosh: Networking, Chapter 8 +// https://dev.os9.ca/techpubs/mac/Networking/Networking-167.html +package zip + +const ( + // SAS is the statically-assigned ZIP socket. + SAS = 6 + // DDPType is the DDP packet type for ZIP messages. + DDPType = 6 + + // ZIP function codes (in the first data byte of a ZIP-over-DDP packet). + FuncQuery = 1 + FuncReply = 2 + FuncGetNetInfoReq = 5 + FuncGetNetInfoRep = 6 + FuncExtReply = 8 + + // GetNetInfo flag bits. + GetNetInfoZoneInvalid = 0x80 + GetNetInfoUseBroadcast = 0x40 + GetNetInfoOnlyOneZone = 0x20 + + // ATP-carried ZIP function codes (in TReq UserBytes high byte). + ATPDDPType = 3 + ATPFuncTReq = 0x40 + ATPFuncTResp = 0x80 + ATPEOM = 0x10 + ATPGetMyZone = 7 + ATPGetZoneList = 8 + ATPGetLocalZoneList = 9 +) diff --git a/router/doc.go b/router/doc.go new file mode 100644 index 0000000..b1e15f0 --- /dev/null +++ b/router/doc.go @@ -0,0 +1,10 @@ +// Package router implements the OmniTalk AppleTalk Phase 2 router core. +// +// The router maintains the routing table (RTMP) and zone information +// table (ZIP), receives DDP datagrams from every registered Port, and +// dispatches them to local Services by socket number or forwards them +// to other ports. +// +// See spec/00-overview.md for socket assignments and the contracts the +// router expects from Service and Port implementations. +package router diff --git a/router/router.go b/router/router.go index baa0c99..bf6da8e 100644 --- a/router/router.go +++ b/router/router.go @@ -1,19 +1,24 @@ package router import ( + "context" "errors" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/port/localtalk" - "github.com/pgodw/omnitalk/go/service" - "github.com/pgodw/omnitalk/go/service/aep" - "github.com/pgodw/omnitalk/go/service/llap" - "github.com/pgodw/omnitalk/go/service/rtmp" - "github.com/pgodw/omnitalk/go/service/zip" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/pkg/telemetry" + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/port/localtalk" + "github.com/pgodw/omnitalk/service" + "github.com/pgodw/omnitalk/service/aep" + "github.com/pgodw/omnitalk/service/llap" + "github.com/pgodw/omnitalk/service/rtmp" + "github.com/pgodw/omnitalk/service/zip" ) +var framesInTotal = telemetry.NewCounter("omnitalk_router_frames_in_total") + type Router struct { shortStr string Ports []port.Port @@ -21,12 +26,12 @@ type Router struct { servicesBySAS map[uint8]service.Service RoutingTable *RoutingTable ZoneInformationTable *ZoneInformationTable - observer func(appletalk.Datagram, port.Port) + observer func(ddp.Datagram, port.Port) } // SetObserver installs a callback that is invoked for every datagram delivered // locally (after DDP decoding, before service dispatch). Pass nil to remove. -func (r *Router) SetObserver(fn func(appletalk.Datagram, port.Port)) { +func (r *Router) SetObserver(fn func(ddp.Datagram, port.Port)) { r.observer = fn } @@ -95,19 +100,19 @@ func (r *Router) bindLLAPManager() { } } -func (r *Router) deliver(datagram appletalk.Datagram, rxPort port.Port) { +func (r *Router) deliver(datagram ddp.Datagram, rxPort port.Port) { if svc, ok := r.servicesBySAS[datagram.DestinationSocket]; ok { svc.Inbound(datagram, rxPort) } } -func (r *Router) Start() error { +func (r *Router) Start(ctx context.Context) error { for _, s := range r.Services { if _, ok := s.(*llap.Service); !ok { continue } netlog.Info("starting %T...", s) - if err := s.Start(r); err != nil { + if err := s.Start(ctx, r); err != nil { return err } } @@ -123,7 +128,7 @@ func (r *Router) Start() error { continue } netlog.Info("starting %T...", s) - if err := s.Start(r); err != nil { + if err := s.Start(ctx, r); err != nil { return err } } @@ -153,7 +158,8 @@ func (r *Router) Stop() error { return nil } -func (r *Router) Inbound(datagram appletalk.Datagram, rxPort port.Port) { +func (r *Router) Inbound(datagram ddp.Datagram, rxPort port.Port) { + framesInTotal.Inc() if rxPort.Network() != 0 { if datagram.DestinationNetwork == 0 && datagram.SourceNetwork == 0 { datagram.DestinationNetwork = rxPort.Network() @@ -188,7 +194,7 @@ func (r *Router) Inbound(datagram appletalk.Datagram, rxPort port.Port) { _ = r.Route(datagram, false) } -func (r *Router) Route(datagram appletalk.Datagram, originating bool) error { +func (r *Router) Route(datagram ddp.Datagram, originating bool) error { if originating { if datagram.HopCount != 0 { return errors.New("originated datagrams must have hop count of 0") @@ -239,13 +245,13 @@ func (r *Router) Route(datagram appletalk.Datagram, originating bool) error { return nil } -func (r *Router) Reply(datagram appletalk.Datagram, rxPort port.Port, ddpType uint8, data []byte) { +func (r *Router) Reply(datagram ddp.Datagram, rxPort port.Port, ddpType uint8, data []byte) { if datagram.SourceNode == 0 || datagram.SourceNode == 0xFF { return } if rxPort.Node() != 0 && (datagram.SourceNetwork == 0 || (datagram.SourceNetwork >= 0xFF00 && datagram.SourceNetwork <= 0xFFFE) || datagram.SourceNetwork < rxPort.NetworkMin() || datagram.SourceNetwork > rxPort.NetworkMax()) { - rxPort.Broadcast(appletalk.Datagram{ + rxPort.Broadcast(ddp.Datagram{ HopCount: 0, DestinationNetwork: 0, SourceNetwork: rxPort.Network(), @@ -258,7 +264,7 @@ func (r *Router) Reply(datagram appletalk.Datagram, rxPort port.Port, ddpType ui }) return } - _ = r.Route(appletalk.Datagram{ + _ = r.Route(ddp.Datagram{ HopCount: 0, DestinationNetwork: datagram.SourceNetwork, SourceNetwork: datagram.DestinationNetwork, // reply FROM the address the client sent TO diff --git a/router/routing_table.go b/router/routing_table.go index 5f35f18..1e30e1e 100644 --- a/router/routing_table.go +++ b/router/routing_table.go @@ -4,8 +4,8 @@ import ( "fmt" "sync" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port" ) type RoutingTableEntry struct { diff --git a/router/zone_information_table.go b/router/zone_information_table.go index 3899327..5289105 100644 --- a/router/zone_information_table.go +++ b/router/zone_information_table.go @@ -5,11 +5,11 @@ import ( "fmt" "sync" - "github.com/pgodw/omnitalk/go/appletalk" + "github.com/pgodw/omnitalk/pkg/encoding" ) func UCase(input []byte) []byte { - return appletalk.MacRomanToUpper(input) + return encoding.MacRomanToUpper(input) } type ZoneInformationTable struct { diff --git a/scripts/ci/build.ps1 b/scripts/ci/build.ps1 index 1dbb88b..fc58ce6 100644 --- a/scripts/ci/build.ps1 +++ b/scripts/ci/build.ps1 @@ -3,7 +3,21 @@ $ErrorActionPreference = 'Stop' $buildVersion = if ($env:BUILD_VERSION) { $env:BUILD_VERSION } else { '0.0.0-dev' } $buildCommit = if ($env:BUILD_COMMIT) { $env:BUILD_COMMIT } else { (git rev-parse --short=12 HEAD).Trim() } $buildDate = if ($env:BUILD_DATE) { $env:BUILD_DATE } else { [DateTime]::UtcNow.ToString('yyyy-MM-ddTHH:mm:ssZ') } -$output = if ($env:OUTPUT) { $env:OUTPUT } else { 'out/omnitalk.exe' } +$buildVariant = if ($env:BUILD_VARIANT) { $env:BUILD_VARIANT } else { 'all' } + +switch ($buildVariant) { + 'all' { $tags = 'all' } + 'router' { $tags = '' } + default { throw "Unsupported BUILD_VARIANT: $buildVariant (expected: all|router)" } +} + +if ($env:OUTPUT) { + $output = $env:OUTPUT +} elseif ($buildVariant -eq 'all') { + $output = 'out/omnitalk.exe' +} else { + $output = "out/omnitalk-$buildVariant.exe" +} $versionForRc = '0.0.0.0' if ($buildVersion -match '^([0-9]+)\.([0-9]+)\.([0-9]+)(?:[-+].*)?$') { @@ -15,16 +29,19 @@ $minor = [int]$parts[1] $patch = [int]$parts[2] $build = [int]$parts[3] +$exeName = Split-Path -Leaf $output +$descriptionSuffix = if ($buildVariant -eq 'all') { '' } else { " ($buildVariant)" } + @" { "StringFileInfo": { "Comments": "OmniTalk", "CompanyName": "ObsoleteMadness", - "FileDescription": "OmniTalk AppleTalk Router", + "FileDescription": "OmniTalk AppleTalk Router$descriptionSuffix", "FileVersion": "$buildVersion", "InternalName": "omnitalk", "LegalCopyright": "GPL-3.0", - "OriginalFilename": "omnitalk.exe", + "OriginalFilename": "$exeName", "ProductName": "OmniTalk", "ProductVersion": "$buildVersion" }, @@ -63,6 +80,10 @@ if ($parent) { New-Item -Path $parent -ItemType Directory -Force | Out-Null } -go build -trimpath ` - -ldflags "-s -w -X main.BuildVersion=$buildVersion -X main.BuildCommit=$buildCommit -X main.BuildDate=$buildDate" ` - -o $output ./cmd/omnitalk +$ldflags = "-s -w -X main.BuildVersion=$buildVersion -X main.BuildCommit=$buildCommit -X main.BuildDate=$buildDate" + +if ($tags) { + go build -trimpath -tags $tags -ldflags $ldflags -o $output ./cmd/omnitalk +} else { + go build -trimpath -ldflags $ldflags -o $output ./cmd/omnitalk +} diff --git a/scripts/ci/build.sh b/scripts/ci/build.sh index c71ebfc..5b60ad3 100644 --- a/scripts/ci/build.sh +++ b/scripts/ci/build.sh @@ -4,11 +4,31 @@ set -euo pipefail build_version="${BUILD_VERSION:-0.0.0-dev}" build_commit="${BUILD_COMMIT:-$(git rev-parse --short=12 HEAD)}" build_date="${BUILD_DATE:-$(date -u +%Y-%m-%dT%H:%M:%SZ)}" -output="${OUTPUT:-out/omnitalk}" +build_variant="${BUILD_VARIANT:-all}" + +case "$build_variant" in + all) tags="all" ;; + router) tags="" ;; + *) + echo "Unsupported BUILD_VARIANT: $build_variant (expected: all|router)" >&2 + exit 1 + ;; +esac + +if [[ -n "${OUTPUT:-}" ]]; then + output="$OUTPUT" +elif [[ "$build_variant" == "all" ]]; then + output="out/omnitalk" +else + output="out/omnitalk-${build_variant}" +fi mkdir -p "$(dirname "$output")" -# Keep version metadata consistent across all non-Windows builds. -go build -trimpath \ - -ldflags "-s -w -X main.BuildVersion=${build_version} -X main.BuildCommit=${build_commit} -X main.BuildDate=${build_date}" \ - -o "$output" ./cmd/omnitalk +ldflags="-s -w -X main.BuildVersion=${build_version} -X main.BuildCommit=${build_commit} -X main.BuildDate=${build_date}" + +if [[ -n "$tags" ]]; then + go build -trimpath -tags "$tags" -ldflags "$ldflags" -o "$output" ./cmd/omnitalk +else + go build -trimpath -ldflags "$ldflags" -o "$output" ./cmd/omnitalk +fi diff --git a/scripts/ci/package-release.ps1 b/scripts/ci/package-release.ps1 index dfde419..b15190b 100644 --- a/scripts/ci/package-release.ps1 +++ b/scripts/ci/package-release.ps1 @@ -1,12 +1,22 @@ $ErrorActionPreference = 'Stop' $releaseTag = if ($env:RELEASE_TAG) { $env:RELEASE_TAG } else { 'dev-local' } -$stage = "release/omnitalk-$releaseTag-windows-amd64" -$archiveName = "omnitalk-$releaseTag-windows-amd64.zip" +$buildVariant = if ($env:BUILD_VARIANT) { $env:BUILD_VARIANT } else { 'all' } + +if ($buildVariant -eq 'all') { + $variantSlug = '' + $exeName = 'omnitalk.exe' +} else { + $variantSlug = "-$buildVariant" + $exeName = "omnitalk-$buildVariant.exe" +} + +$stage = "release/omnitalk$variantSlug-$releaseTag-windows-amd64" +$archiveName = "omnitalk$variantSlug-$releaseTag-windows-amd64.zip" New-Item -ItemType Directory -Path $stage -Force | Out-Null -Copy-Item out/omnitalk.exe "$stage/omnitalk.exe" -Copy-Item README.md,server.ini.example,extmap.conf $stage +Copy-Item "out/$exeName" "$stage/$exeName" +Copy-Item README.md,server.toml.example,extmap.conf $stage Get-ChildItem -Path dist -Force | Copy-Item -Destination $stage -Recurse -Force Compress-Archive -Path $stage -DestinationPath $archiveName -Force diff --git a/scripts/ci/package-release.sh b/scripts/ci/package-release.sh index 7b3d6a5..b7be877 100644 --- a/scripts/ci/package-release.sh +++ b/scripts/ci/package-release.sh @@ -4,19 +4,28 @@ set -euo pipefail target_os="${TARGET_OS:-}" release_tag="${RELEASE_TAG:-dev-local}" build_version="${BUILD_VERSION:-0.0.0-dev}" +build_variant="${BUILD_VARIANT:-all}" if [[ -z "$target_os" ]]; then echo "TARGET_OS is required (linux|macos)" >&2 exit 1 fi +if [[ "$build_variant" == "all" ]]; then + variant_slug="" + exe_name="omnitalk" +else + variant_slug="-${build_variant}" + exe_name="omnitalk-${build_variant}" +fi + if [[ "$target_os" == "linux" ]]; then - stage="release/omnitalk-${release_tag}-linux-amd64" - archive_name="omnitalk-${release_tag}-linux-amd64.tar.gz" + stage="release/omnitalk${variant_slug}-${release_tag}-linux-amd64" + archive_name="omnitalk${variant_slug}-${release_tag}-linux-amd64.tar.gz" mkdir -p "$stage" - cp out/omnitalk "$stage/" - cp README.md server.ini.example extmap.conf "$stage/" + cp "out/${exe_name}" "$stage/${exe_name}" + cp README.md server.toml.example extmap.conf "$stage/" cp -a dist/. "$stage/" tar -C release -czf "$archive_name" "$(basename "$stage")" echo "$archive_name" @@ -24,25 +33,38 @@ if [[ "$target_os" == "linux" ]]; then fi if [[ "$target_os" == "macos" ]]; then - stage="release/omnitalk-${release_tag}-macos-amd64" - archive_name="omnitalk-${release_tag}-macos-amd64.zip" - app_root="$stage/OmniTalk.app/Contents" + stage="release/omnitalk${variant_slug}-${release_tag}-macos-amd64" + archive_name="omnitalk${variant_slug}-${release_tag}-macos-amd64.zip" + if [[ "$build_variant" == "all" ]]; then + bundle_name="OmniTalk.app" + else + bundle_name="OmniTalk-${build_variant}.app" + fi + app_root="$stage/${bundle_name}/Contents" mkdir -p "$app_root/MacOS" "$app_root/Resources" - cp out/omnitalk "$app_root/MacOS/omnitalk" + cp "out/${exe_name}" "$app_root/MacOS/omnitalk" chmod +x "$app_root/MacOS/omnitalk" cp icons/omnitalk.icns "$app_root/Resources/omnitalk.icns" + if [[ "$build_variant" == "all" ]]; then + display_name="OmniTalk" + bundle_id="com.obsoletemadness.omnitalk" + else + display_name="OmniTalk (${build_variant})" + bundle_id="com.obsoletemadness.omnitalk.${build_variant}" + fi + cat > "$app_root/Info.plist" < - CFBundleDisplayNameOmniTalk + CFBundleDisplayName${display_name} CFBundleExecutableomnitalk CFBundleIconFileomnitalk.icns - CFBundleIdentifiercom.obsoletemadness.omnitalk - CFBundleNameOmniTalk + CFBundleIdentifier${bundle_id} + CFBundleName${display_name} CFBundlePackageTypeAPPL CFBundleShortVersionString${build_version} CFBundleVersion${build_version} @@ -51,7 +73,7 @@ if [[ "$target_os" == "macos" ]]; then EOF - cp README.md server.ini.example extmap.conf "$stage/" + cp README.md server.toml.example extmap.conf "$stage/" cp -a dist/. "$stage/" (cd release && zip -r "../$archive_name" "$(basename "$stage")") echo "$archive_name" diff --git a/scripts/ci/test.sh b/scripts/ci/test.sh index c0db04f..b4c6da2 100644 --- a/scripts/ci/test.sh +++ b/scripts/ci/test.sh @@ -1,11 +1,25 @@ #!/usr/bin/env bash set -euo pipefail -mapfile -t packages < <(go list ./... | grep -Ev '(^|/)(dist|icon|icons)($|/)' || true) +# Run unit tests across the relevant build-tag combinations so optional +# subsystems (afp, macgarden, macip) actually get exercised — `go test +# ./...` without tags would skip the bulk of the codebase. +tag_sets=( + "" + "afp" + "afp macgarden" + "afp macip" + "afp macgarden macip" + "afp sqlite_cnid" + "all" +) -if [[ ${#packages[@]} -eq 0 ]]; then - echo "No packages found to test" >&2 - exit 1 -fi - -go test "${packages[@]}" +for tags in "${tag_sets[@]}"; do + echo "=== go test -tags \"${tags}\" ===" + mapfile -t packages < <(go list -tags "${tags}" ./... | grep -Ev '(^|/)(dist|icon|icons)($|/)' || true) + if [[ ${#packages[@]} -eq 0 ]]; then + echo "No packages found to test for tags=\"${tags}\"" >&2 + exit 1 + fi + go test -tags "${tags}" "${packages[@]}" +done diff --git a/server.ini b/server.ini deleted file mode 100644 index 35f338c..0000000 --- a/server.ini +++ /dev/null @@ -1,84 +0,0 @@ -[LToUdp] -; LocalTalk over UDP Settings (used by Mini vMac UDP builds and SNOW emu) -enabled = true ; Enable LToUDP - true for on, false for off -seed_network = 1 ; LToUDO seed network number -seed_zone = "LToUDP Network" ; LToUDP seed zone name. - -[TashTalk] -; TashTalk is a PIC-based RS482 localtalk to serial adaptor -port = COM6 ; blank to disable, otherwise the serial port to use (eg COM1, /dev/ttyAMA0) -seed_network = 2 ; TashTalk seed network number -seed_zone = "TashTalk Network" ; TashTalk seed zone name - -[EtherTalk] -; Ethertalk is a pcap based Network Bridge -backend = pcap ; supported: pcap, tap, tun. Leave blank to disable ethertalk. -device = "\Device\NPF_{B7D4E073-2185-4912-BBE8-3948C6636D02}" ; PCap device name. Blank to disable ethertalk. Call with -list-pcap-devices to see what to use. Linux /dev/eth0. Windows: "\Device\NPF_{B7D4E073-2185-4912-BBE8-3948C6636D02}". -;device = "\Device\NPF_{7A63BBB0-EBC1-4FA7-A397-8E7F42E39A73}" ; PCap device name. Blank to disable ethertalk. Call with -list-pcap-devices to see what to use. Linux /dev/eth0. Windows: "\Device\NPF_{B7D4E073-2185-4912-BBE8-3948C6636D02}". -hw_address = "DE:AD:BE:EF:CA:FE" ; EtherTalk Hardware Address to use for router. -seed_network_min = 3 ; EtherTalk seed network number -seed_network_max = 5 ; EtherTalk seed network -seed_zone = "EtherTalk Network" ; EtherTalk seed zone name -bridge_mode = auto ; auto (default), ethernet, or wifi. Use wifi for bridge-shim rewriting on Wi-Fi adapters. -bridge_host_mac = ; optional host adapter MAC for Wi-Fi bridge shim. Defaults to hw_address when blank. - - -[MacIP] -; MacIP Gateway Settings. Allows TCP over DDP. -enabled = true ; true to enable MacIP Gateway, false to disable -mode = pcap ; modes are pcap or nat. -zone = ; MacIP Gateway Zone, defaults to EtherTalk zone, otherwise the first zone detected. -nat_subnet = ; in NAT mode, the subnet to use (eg 192.168.100.0/24) -nat_gw = ; in NAT mode, the IP Address to use for the gateway (eg 192.168.100.1) -lease_file = leases.txt ; in NAT mode, persist DHCP leases to the specified file -ip_gateway = ; Upstream/default gateway on the IP-side network -dhcp_relay = true ; DHCP Relay, converts MacTCP Auto Config to DHCP requests -nameserver = 1.1.1.1 ; Name server for DNS - - -[AFP] -; Apple Filing Protocol Server Settings -enabled = true ; true to enable AFP Server, false to disable -name = "OmniTalk" ; Name of the server to use. Max length of 31 characters. -zone = "EtherTalk Network" ; Name of the AppleTalk Zone to list the server in -protocols = ddp,tcp ; Protocols to use. Supports ddp (AppleTalk) and tcp (TCP/IP). They can be combined (eg ddp,tcp) -binding = ":548" ; When TCP is enabled, the IP+Port to bind the service to. -extension_map = "extmap.conf" ; Netatalk compatible extension mapping file - -[Volumes.Default] -name = "Welcome" -path = "./dist/Sample Volume" -read_only = true - -[Volumes.TestVolume] -; AFP Volume Configuration. Each volume must have a section for this. -name = "Test Volume" ; Volume Name. Max Length of 31 characters. -path = "C:\Mac\Test" ; Host path for the volume. Eg "/media/Mac", "C:\Foo" -cnid_backend = ; leave blank for default. Default is "memory" and is currently the only mode supported -use_decomposed_names = true ; Encode host-reserved filename characters using 0xNN tokens when mapping AFP paths. Default is true. -fork_backend = AppleDouble ; Fork backend to use. Currently only "AppleDouble" is implemented. -appledouble_mode = "modern" ; AppleDouble mode to use if using AppleDouble. Supported options are "legacy" and "modern". - ; Legacy is the NetaTalk 2.x ".appledouble" folder approach. - ; Modern is the NetaTalk 4.x method of "._" side cars. Default is "modern". -rebuild_desktop_db = false ; When true, rebuilds the desktop database from resource forks. Default is false. - -[Volumes.Volume68k] -; AFP Volume Configuration. Each volume must have a section for this. -name = "Volume 68K" ; Volume Name. Max Length of 31 characters. -path = "C:\Mac\Volume68K" ; Host path for the volume. Eg "/media/Mac", "C:\Foo" -cnid_backend = ; leave blank for default. Default is "memory" and is currently the only mode supported -use_decomposed_names = true ; Encode host-reserved filename characters using 0xNN tokens when mapping AFP paths. Default is true. -fork_backend = AppleDouble ; Fork backend to use. Currently only "AppleDouble" is implemented. -appledouble_mode = "legacy" ; AppleDouble mode to use if using AppleDouble. Supported options are "legacy" and "modern". - ; Legacy is the NetaTalk 2.x ".appledouble" folder approach. - ; Modern is the NetaTalk 4.x method of "._" side cars. Default is "modern". -rebuild_desktop_db = false ; When true, rebuilds the desktop database from resource forks. - - -[Logging] -level = debug -parse_packets = true -log_traffic = true - - - diff --git a/server.ini.example b/server.ini.example deleted file mode 100644 index 0829896..0000000 --- a/server.ini.example +++ /dev/null @@ -1,79 +0,0 @@ -[LToUdp] -; LocalTalk over UDP Settings (used by Mini vMac UDP builds and SNOW emu) -enabled = true ; Enable LToUDP - true for on, false for off -interface = 0.0.0.0 ; local IPv4 interface/address for multicast join+send (0.0.0.0 = auto) -seed_network = 1 ; LToUDO seed network number -seed_zone = "LToUDP Network" ; LToUDP seed zone name. - -[TashTalk] -; TashTalk is a PIC-based RS482 localtalk to serial adaptor -port = ; blank to disable, otherwise the serial port to use (eg COM1, /dev/ttyAMA0) -seed_network = 2 ; TashTalk seed network number -seed_zone = "TashTalk Network" ; TashTalk seed zone name - -[EtherTalk] -; Ethertalk is a pcap based Network Bridge -backend = pcap ; supported: pcap, tap, tun. Leave blank to disable ethertalk. -;device = "\Device\NPF_{B7D4E073-2185-4912-BBE8-3948C6636D02}" ; PCap device name. Blank to disable ethertalk. Call with -list-pcap-devices to see what to use. Linux /dev/eth0. Windows: "\Device\NPF_{B7D4E073-2185-4912-BBE8-3948C6636D02}". -device = "\Device\NPF_{1DFDAA9C-7DD4-40F8-B6D4-9298C273D654}" -hw_address = "DE:AD:BE:EF:CA:FE" ; EtherTalk Hardware Address to use for router. -bridge_mode = auto ; auto (default), ethernet, or wifi. Use wifi for bridge-shim rewriting on Wi-Fi adapters. -bridge_host_mac = ; optional host adapter MAC for Wi-Fi bridge shim. Defaults to hw_address when blank. -seed_network_min = 3 ; EtherTalk seed network number -seed_network_max = 5 ; EtherTalk seed network -seed_zone = "EtherTalk Network" ; EtherTalk seed zone name - -[MacIP] -; MacIP Gateway Settings. Allows TCP over DDP. -enabled = false ; true to enable MacIP Gateway, false to disable -mode = pcap ; modes are pcap or nat. -zone = ; MacIP Gateway Zone, defaults to EtherTalk zone, otherwise the first zone detected. -nat_subnet = ; in NAT mode, the subnet to use (eg 192.168.100.0/24) -nat_gw = ; in NAT mode, the IP Address to use for the gateway (eg 192.168.100.1) -lease_file = leases.txt ; in NAT mode, persist DHCP leases to the specified file -ip_gateway = "192.168.0.1" ; Maps to -macip-ip-gw (upstream/default gateway on the IP-side network) -dhcp_relay = true ; DHCP Relay, converts MacTCP Auto Config to DHCP requests -nameserver = 1.1.1.1 ; Name server for DNS - - -[AFP] -; Apple Filing Protocol Server Settings -enabled = true ; true to enable AFP Server, false to disable -name = "OmniTalk" ; Name of the server to use. Max length of 31 characters. -zone = "EtherTalk Network" ; Name of the AppleTalk Zone to list the server in -protocols = ddp,tcp ; Protocols to use. Supports ddp (AppleTalk) and tcp (TCP/IP). They can be combined (eg ddp,tcp) -binding = ":548" ; When TCP is enabled, the IP+Port to bind the service to. -extension_map = "extmap.conf" ; Netatalk compatible extension mapping file - -[Volumes.TestVolume] -; AFP Volume Configuration. Each volume must have a section for this. -name = "Test Volume" ; Volume Name. Max Length of 31 characters. -path = "C:\Mac\Test" ; Host path for the volume. Eg "/media/Mac", "C:\Foo" -cnid_backend = ; leave blank for default. Default is "memory" and is currently the only mode supported -use_decomposed_names = true ; Encode host-reserved filename characters using 0xNN tokens when mapping AFP paths. Default is true. -fork_backend = AppleDouble ; Fork backend to use. Currently only "AppleDouble" is implemented. -appledouble_mode = "modern" ; AppleDouble mode to use if using AppleDouble. Supported options are "legacy" and "modern". - ; Legacy is the NetaTalk 2.x ".appledouble" folder approach. - ; Modern is the NetaTalk 4.x method of "._" side cars. Default is "modern". -rebuild_desktop_db = false ; When true, rebuilds the desktop database from resource forks. Default is false. - -[Volumes.Volume68k] -; AFP Volume Configuration. Each volume must have a section for this. -name = "Volume 68K" ; Volume Name. Max Length of 31 characters. -path = "C:\Mac\Volume68K" ; Host path for the volume. Eg "/media/Mac", "C:\Foo" -cnid_backend = ; leave blank for default. Default is "memory" and is currently the only mode supported -use_decomposed_names = true ; Encode host-reserved filename characters using 0xNN tokens when mapping AFP paths. Default is true. -fork_backend = AppleDouble ; Fork backend to use. Currently only "AppleDouble" is implemented. -appledouble_mode = "legacy" ; AppleDouble mode to use if using AppleDouble. Supported options are "legacy" and "modern". - ; Legacy is the NetaTalk 2.x ".appledouble" folder approach. - ; Modern is the NetaTalk 4.x method of "._" side cars. Default is "modern". -rebuild_desktop_db = false ; When true, rebuilds the desktop database from resource forks. - - -[Logging] -level = debug -parse_packets = true -log_traffic = false - - - diff --git a/server.toml b/server.toml new file mode 100644 index 0000000..6fdc912 --- /dev/null +++ b/server.toml @@ -0,0 +1,70 @@ +[LToUdp] +# LocalTalk over UDP Settings (used by Mini vMac UDP builds and SNOW emu) +enabled = true # Enable LToUDP - true for on, false for off +seed_network = 1 # LToUDP seed network number +seed_zone = "LToUDP Network" # LToUDP seed zone name + +[TashTalk] +# TashTalk is a PIC-based RS422 LocalTalk to serial adaptor +# port = "COM6" # blank to disable, otherwise the serial port to use (eg COM1, /dev/ttyAMA0) +seed_network = 2 # TashTalk seed network number +seed_zone = "TashTalk Network" + +[EtherTalk] +# EtherTalk is a pcap-based network bridge +backend = "pcap" # supported: pcap, tap, tun. Leave blank to disable EtherTalk. +device = '\Device\NPF_{B7D4E073-2185-4912-BBE8-3948C6636D02}' +# device = '\Device\NPF_{7A63BBB0-EBC1-4FA7-A397-8E7F42E39A73}' +hw_address = "DE:AD:BE:EF:CA:FE" # EtherTalk hardware address for the router +seed_network_min = 3 +seed_network_max = 5 +seed_zone = "EtherTalk Network" +bridge_mode = "auto" # auto (default), ethernet, or wifi +bridge_host_mac = "" # optional host adapter MAC for Wi-Fi bridge shim + +[MacIP] +# MacIP Gateway Settings. Allows TCP over DDP. +enabled = true # true to enable MacIP gateway +mode = "pcap" # pcap or nat +zone = "" # MacIP gateway zone, defaults to EtherTalk zone +nat_subnet = "" # in NAT mode, the subnet to use +nat_gw = "" # in NAT mode, the gateway IP +lease_file = "leases.txt" # in NAT mode, persist DHCP leases here +ip_gateway = "" # upstream/default gateway on the IP-side network +dhcp_relay = true # convert MacTCP auto-config to DHCP requests +nameserver = "1.1.1.1" # DNS nameserver + +[AFP] +# Apple Filing Protocol server settings +enabled = true +name = "OmniTalk" # Server name. Max 31 characters. +zone = "EtherTalk Network" +protocols = "ddp,tcp" # Comma-separated: ddp, tcp, or both +binding = ":548" +extension_map = "extmap.conf" # Netatalk-compatible extension mapping file + +[AFP.Volumes.Default] +name = "Welcome" +path = "./dist/Sample Volume" +read_only = true + +[AFP.Volumes.TestVolume] +name = "Test Volume" # Volume name. Max 31 characters. +path = 'C:\Mac\Test' +appledouble_mode = "modern" # per-volume override; "modern" (._ sidecars) or "legacy" (.appledouble folder) +rebuild_desktop_db = false + +[AFP.Volumes.Volume68k] +name = "Volume 68K" +path = 'C:\Mac\Volume68K' +appledouble_mode = "legacy" +rebuild_desktop_db = false + +[AFP.Volumes.MacGarden] +name = "Mac Garden" +fs_type = "macgarden" + +[Logging] +level = "debug" +parse_packets = true +log_traffic = false diff --git a/server.toml.example b/server.toml.example new file mode 100644 index 0000000..8be1d6b --- /dev/null +++ b/server.toml.example @@ -0,0 +1,70 @@ +[LToUdp] +# LocalTalk over UDP Settings (used by Mini vMac UDP builds and SNOW emu) +enabled = true # Enable LToUDP - true for on, false for off +interface = "0.0.0.0" # local IPv4 interface/address for multicast join+send (0.0.0.0 = auto) +seed_network = 1 # LToUDP seed network number +seed_zone = "LToUDP Network" # LToUDP seed zone name + +[TashTalk] +# TashTalk is a PIC-based RS422 LocalTalk to serial adaptor +port = "" # blank to disable, otherwise the serial port to use (eg COM1, /dev/ttyAMA0) +seed_network = 2 # TashTalk seed network number +seed_zone = "TashTalk Network" # TashTalk seed zone name + +[EtherTalk] +# EtherTalk is a pcap-based network bridge +backend = "pcap" # supported: pcap, tap, tun. Leave blank to disable EtherTalk. +# device = '\Device\NPF_{B7D4E073-2185-4912-BBE8-3948C6636D02}' +# PCap device name. Blank to disable EtherTalk. Use literal strings (single quotes) on Windows +# so the backslashes are not interpreted as TOML escapes. Linux: "/dev/eth0". +device = '\Device\NPF_{1DFDAA9C-7DD4-40F8-B6D4-9298C273D654}' +hw_address = "DE:AD:BE:EF:CA:FE" # EtherTalk hardware address for the router +bridge_mode = "auto" # auto (default), ethernet, or wifi. Use wifi for bridge-shim rewriting on Wi-Fi adapters. +bridge_host_mac = "" # optional host adapter MAC for Wi-Fi bridge shim. Defaults to hw_address when blank. +seed_network_min = 3 # EtherTalk seed network minimum +seed_network_max = 5 # EtherTalk seed network maximum +seed_zone = "EtherTalk Network" + +[MacIP] +# MacIP Gateway Settings. Allows TCP over DDP. +enabled = false # true to enable MacIP gateway +mode = "pcap" # pcap or nat +zone = "" # MacIP gateway zone, defaults to EtherTalk zone +nat_subnet = "" # in NAT mode, the subnet to use (eg 192.168.100.0/24) +nat_gw = "" # in NAT mode, the IP address to use for the gateway +lease_file = "leases.txt" # in NAT mode, persist DHCP leases to this file +ip_gateway = "192.168.0.1" # upstream/default gateway on the IP-side network +dhcp_relay = true # convert MacTCP auto-config to DHCP requests +nameserver = "1.1.1.1" # DNS nameserver + +[AFP] +# Apple Filing Protocol server settings +enabled = true # true to enable AFP server +name = "OmniTalk" # Server name. Max 31 characters. +zone = "EtherTalk Network" # AppleTalk zone to advertise the server in +protocols = "ddp,tcp" # Comma-separated: ddp, tcp, or both +binding = ":548" # When TCP is enabled, the bind address +extension_map = "extmap.conf" # Netatalk-compatible extension mapping file +cnid_backend = "sqlite" # CNID backend: sqlite or memory +use_decomposed_names = true # Encode host-reserved filename characters using 0xNN tokens +appledouble_mode = "modern" # "modern" (._ sidecars, Netatalk 4.x) or "legacy" (.appledouble folder) + +[AFP.Volumes.TestVolume] +# Each AFP volume gets an [AFP.Volumes.] section. +name = "Test Volume" # Volume name. Max 31 characters. +path = 'C:\Mac\Test' # Host path. Use literal strings on Windows to skip TOML escapes. +fs_type = "local_fs" # Filesystem backend: local_fs (default) or macgarden +appledouble_mode = "modern" # Per-volume override; falls back to AFP.appledouble_mode +rebuild_desktop_db = false # Rebuild the desktop DB from resource forks at startup + +[AFP.Volumes.Volume68k] +name = "Volume 68K" +path = 'C:\Mac\Volume68K' +fs_type = "local_fs" +appledouble_mode = "legacy" +rebuild_desktop_db = false + +[Logging] +level = "debug" +parse_packets = true +log_traffic = false diff --git a/service/aep/aep.go b/service/aep/aep.go index b6c9145..4196772 100644 --- a/service/aep/aep.go +++ b/service/aep/aep.go @@ -9,27 +9,35 @@ Inside Macintosh: Networking, Chapter 3. package aep import ( - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" + "context" + "sync" + + "github.com/pgodw/omnitalk/protocol/aep" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/service" ) +// Socket is the well-known AEP socket number, re-exported from protocol/aep +// for callers wiring a router. +const Socket = aep.Socket + const ( - // Socket is the well-known AEP socket number. - Socket = 4 - ddpTypeAEP = 4 - cmdRequest = 1 - cmdReply = 2 + ddpTypeAEP = aep.DDPType + cmdRequest = aep.CmdRequest + cmdReply = aep.CmdReply ) // Service implements the AppleTalk Echo Protocol. type Service struct { ch chan item stop chan struct{} + wg sync.WaitGroup } type item struct { - d appletalk.Datagram + d ddp.Datagram p port.Port } @@ -45,10 +53,14 @@ func New() *Service { func (s *Service) Socket() uint8 { return Socket } // Start launches the AEP processing goroutine. -func (s *Service) Start(router service.Router) error { +func (s *Service) Start(ctx context.Context, router service.Router) error { + s.wg.Add(1) go func() { + defer s.wg.Done() for { select { + case <-ctx.Done(): + return case <-s.stop: return case it := <-s.ch: @@ -67,11 +79,12 @@ func (s *Service) Start(router service.Router) error { // Stop shuts down the AEP service. func (s *Service) Stop() error { close(s.stop) + s.wg.Wait() return nil } // Inbound queues an incoming datagram for processing. -func (s *Service) Inbound(d appletalk.Datagram, p port.Port) { +func (s *Service) Inbound(d ddp.Datagram, p port.Port) { select { case s.ch <- item{d, p}: default: diff --git a/service/aep/doc.go b/service/aep/doc.go new file mode 100644 index 0000000..dfb1628 --- /dev/null +++ b/service/aep/doc.go @@ -0,0 +1,5 @@ +// Package aep implements the AppleTalk Echo Protocol — the simple +// ping responder on DDP socket 4. +// +// See spec/04-aep.md and Inside AppleTalk 2/e §6. +package aep diff --git a/service/afp/appledouble.go b/service/afp/appledouble.go deleted file mode 100644 index 21c2796..0000000 --- a/service/afp/appledouble.go +++ /dev/null @@ -1,194 +0,0 @@ -package afp - -// Package-level support for AppleDouble (._filename) files. -// -// AppleDouble is the format used by macOS, netatalk 4.x, and Samba/CIFS to store -// resource forks and Finder metadata alongside regular files on non-HFS filesystems. -// The sidecar file is named "._" + original filename and lives in the same directory. -// -// References: -// - AppleDouble / AppleSingle Formats, Apple II File Type Note $E0/0000 -// - netatalk 4.x source (afpd/unix.c, libatalk/adouble/) -// - macOS copyfile(3) / xattr behavior on SMB/CIFS mounts - -import ( - "encoding/binary" - "io" - "path/filepath" -) - -const ( - adMagic uint32 = 0x00051607 - adVersion uint32 = 0x00020000 - - // AppleDouble entry IDs (AppleSingle/AppleDouble spec). - adEntryIDDataFork = uint32(1) - adEntryIDFinderInfo = uint32(9) - adEntryIDResourceFork = uint32(2) - adEntryIDComment = uint32(4) - // adEntryIDIconBW is the AppleSingle/AppleDouble entry ID for a classic - // 32x32 1-bit Macintosh icon (see netatalk adouble.h AD_ICON). The payload - // is 128 bytes of bitmap with no mask. - adEntryIDIconBW = uint32(5) - - adHeaderSize = 26 // magic(4)+version(4)+filler(16)+numEntries(2) - adEntrySize = 12 // id(4)+offset(4)+length(4) - - // Offsets for a standard 2-entry AppleDouble (FinderInfo + ResourceFork). - adFinderInfoOffset = uint32(adHeaderSize + 2*adEntrySize) // 50 - adResourceForkStart = adFinderInfoOffset + 32 // 82 - - // Byte offset of the resource-fork entry's "length" field within the file - // for a canonical two-entry file (FinderInfo + ResourceFork). - adRsrcLenFileOffset = int64(adHeaderSize + adEntrySize + 8) // 46 -) - -// appleDoublePath returns the modern (._name) sidecar path for filePath. -// Backend code may choose a different layout (for example legacy .AppleDouble). -func appleDoublePath(filePath string) string { - return filepath.Join(filepath.Dir(filePath), "._"+filepath.Base(filePath)) -} - -// appleDoubleData holds the parsed contents of an AppleDouble sidecar file. -type appleDoubleData struct { - finderInfo [32]byte - rsrcOffset int64 - rsrcLength int64 - rsrcLenFieldAt int64 // file offset of the ResourceFork entry's length field - hasRsrc bool -} - -type parsedAppleDouble struct { - finderInfo [32]byte - comment []byte - rsrc []byte - iconBW []byte - rsrcOffset int64 - rsrcLenAt int64 - hasFinder bool - hasComment bool - hasRsrc bool - hasIconBW bool -} - -func parseAppleDoubleBytes(b []byte) (parsedAppleDouble, error) { - var out parsedAppleDouble - if len(b) < adHeaderSize { - return out, io.ErrUnexpectedEOF - } - if binary.BigEndian.Uint32(b[0:4]) != adMagic { - return out, io.ErrUnexpectedEOF - } - numEntries := int(binary.BigEndian.Uint16(b[24:26])) - entriesStart := adHeaderSize - entriesLen := numEntries * adEntrySize - if len(b) < entriesStart+entriesLen { - return out, io.ErrUnexpectedEOF - } - - for i := 0; i < numEntries; i++ { - off := entriesStart + i*adEntrySize - id := binary.BigEndian.Uint32(b[off : off+4]) - eOff := int(binary.BigEndian.Uint32(b[off+4 : off+8])) - eLen := int(binary.BigEndian.Uint32(b[off+8 : off+12])) - if eOff < 0 || eLen < 0 || eOff+eLen > len(b) { - continue - } - switch id { - case adEntryIDFinderInfo: - if eLen >= 32 { - copy(out.finderInfo[:], b[eOff:eOff+32]) - out.hasFinder = true - } - case adEntryIDComment: - if eLen > 0 { - out.comment = append([]byte(nil), b[eOff:eOff+eLen]...) - out.hasComment = true - } - case adEntryIDResourceFork: - out.rsrcOffset = int64(eOff) - out.rsrcLenAt = int64(off + 8) - if eLen > 0 { - out.rsrc = append([]byte(nil), b[eOff:eOff+eLen]...) - } else { - out.rsrc = nil - } - out.hasRsrc = true - case adEntryIDIconBW: - if eLen > 0 { - out.iconBW = append([]byte(nil), b[eOff:eOff+eLen]...) - out.hasIconBW = true - } - case adEntryIDDataFork: - // Not used by our server; ignore. - } - } - return out, nil -} - -func buildAppleDoubleBytes(p parsedAppleDouble, includeCommentEntry bool, commentLen uint32) []byte { - // We always write FinderInfo and ResourceFork entries. - numEntries := 2 - if includeCommentEntry { - numEntries = 3 - } - headerLen := adHeaderSize + numEntries*adEntrySize - - finderOff := uint32(headerLen) - finderLen := uint32(32) - cur := finderOff + finderLen - - var commentOff uint32 - if includeCommentEntry { - commentOff = cur - cur += commentLen - } - - rsrcOff := cur - rsrcLen := uint32(len(p.rsrc)) - total := int(rsrcOff + rsrcLen) - if total < int(rsrcOff) { - total = int(rsrcOff) - } - out := make([]byte, total) - - // Header - binary.BigEndian.PutUint32(out[0:4], adMagic) - binary.BigEndian.PutUint32(out[4:8], adVersion) - // filler [8:24] stays zero - binary.BigEndian.PutUint16(out[24:26], uint16(numEntries)) - - // Entries - entriesStart := adHeaderSize - putEntry := func(i int, id, off, ln uint32) { - base := entriesStart + i*adEntrySize - binary.BigEndian.PutUint32(out[base:base+4], id) - binary.BigEndian.PutUint32(out[base+4:base+8], off) - binary.BigEndian.PutUint32(out[base+8:base+12], ln) - } - - putEntry(0, adEntryIDFinderInfo, finderOff, finderLen) - if includeCommentEntry { - putEntry(1, adEntryIDComment, commentOff, commentLen) - putEntry(2, adEntryIDResourceFork, rsrcOff, rsrcLen) - } else { - putEntry(1, adEntryIDResourceFork, rsrcOff, rsrcLen) - } - - // FinderInfo payload - if p.hasFinder { - copy(out[finderOff:finderOff+finderLen], p.finderInfo[:]) - } - - // Comment payload (if present) - if includeCommentEntry && commentLen > 0 && len(p.comment) > 0 { - copy(out[commentOff:commentOff+commentLen], p.comment[:commentLen]) - } - - // Resource fork payload (if present) - if rsrcLen > 0 { - copy(out[rsrcOff:rsrcOff+rsrcLen], p.rsrc) - } - - return out -} diff --git a/service/afp/appledouble_backend.go b/service/afp/appledouble_backend.go index 42a7b9b..08a477e 100644 --- a/service/afp/appledouble_backend.go +++ b/service/afp/appledouble_backend.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -8,6 +10,8 @@ import ( "os" "path/filepath" "strings" + + "github.com/pgodw/omnitalk/pkg/appledouble" ) const defaultAppleDoubleMode = AppleDoubleModeModern @@ -32,7 +36,7 @@ func NewAppleDoubleBackend(fs FileSystem, mode AppleDoubleMode, decomposedNames return &AppleDoubleBackend{fs: fs, mode: mode, decomposedNames: decomposedNames} } -func resolveForkMetadataBackend(options AFPOptions, fs FileSystem) ForkMetadataBackend { +func resolveForkMetadataBackend(options Options, fs FileSystem) ForkMetadataBackend { if options.ForkMetadataBackend != nil { return options.ForkMetadataBackend } @@ -220,9 +224,9 @@ func (b *AppleDoubleBackend) OpenResourceFork(path string, writable bool) (File, return nil, ResourceForkInfo{}, err } return f, ResourceForkInfo{ - Offset: int64(adResourceForkStart), + Offset: int64(appledouble.ResourceForkStart), Length: 0, - LengthFieldOffset: adRsrcLenFileOffset, + LengthFieldOffset: appledouble.ResourceLenFileOffset, }, nil } @@ -233,7 +237,7 @@ func (b *AppleDoubleBackend) TruncateResourceFork(file File, info ResourceForkIn lenFieldAt := info.LengthFieldOffset if lenFieldAt == 0 { - lenFieldAt = adRsrcLenFileOffset + lenFieldAt = appledouble.ResourceLenFileOffset } lenBuf := make([]byte, 4) @@ -535,7 +539,7 @@ func (b *AppleDoubleBackend) readFile(path string) ([]byte, error) { return nil, readErr } } - if len(buf) < adHeaderSize { + if len(buf) < appledouble.HeaderSize { return nil, io.ErrUnexpectedEOF } return buf, nil @@ -560,7 +564,18 @@ func (b *AppleDoubleBackend) createAppleDoublePath(adPath string) error { if err := b.ensureAppleDoubleDir(adPath); err != nil { return err } - return b.writeFile(adPath, buildAppleDoubleBytes(parsedAppleDouble{}, false, 0)) + return b.writeFile(adPath, appledouble.Build(appledouble.Parsed{}, false, 0)) +} + +// appleDoubleData is the slim summary the fork I/O paths consume from a +// parsed sidecar — just enough to graft Finder info and resource-fork +// length onto an open file. +type appleDoubleData struct { + finderInfo [32]byte + rsrcOffset int64 + rsrcLength int64 + rsrcLenFieldAt int64 + hasRsrc bool } func (b *AppleDoubleBackend) readAppleDoubleDataPath(adPath string) appleDoubleData { @@ -570,18 +585,18 @@ func (b *AppleDoubleBackend) readAppleDoubleDataPath(adPath string) appleDoubleD return result } - parsed, err := parseAppleDoubleBytes(bts) + parsed, err := appledouble.Parse(bts) if err != nil { return result } - if parsed.hasFinder { - result.finderInfo = parsed.finderInfo + if parsed.HasFinder { + result.finderInfo = parsed.FinderInfo } - if parsed.hasRsrc { - result.rsrcOffset = parsed.rsrcOffset - result.rsrcLength = int64(len(parsed.rsrc)) - result.rsrcLenFieldAt = parsed.rsrcLenAt + if parsed.HasResource { + result.rsrcOffset = parsed.ResourceOffset + result.rsrcLength = int64(len(parsed.Resource)) + result.rsrcLenFieldAt = parsed.ResourceLenAt result.hasRsrc = true } return result @@ -602,11 +617,11 @@ func (b *AppleDoubleBackend) writeFinderInfoPath(adPath string, fi [32]byte) err } } - parsed, _ := parseAppleDoubleBytes(bts) - parsed.finderInfo = fi - parsed.hasFinder = true + parsed, _ := appledouble.Parse(bts) + parsed.FinderInfo = fi + parsed.HasFinder = true - out := buildAppleDoubleBytes(parsed, parsed.hasComment, uint32(len(parsed.comment))) + out := appledouble.Build(parsed, parsed.HasComment, uint32(len(parsed.Comment))) return b.writeFile(adPath, out) } @@ -622,14 +637,14 @@ func (b *AppleDoubleBackend) writeAppleDoubleCommentPath(adPath string, comment } } - parsed, _ := parseAppleDoubleBytes(bts) + parsed, _ := appledouble.Parse(bts) if len(comment) > 199 { comment = comment[:199] } - parsed.comment = append([]byte(nil), comment...) - parsed.hasComment = len(comment) > 0 + parsed.Comment = append([]byte(nil), comment...) + parsed.HasComment = len(comment) > 0 - out := buildAppleDoubleBytes(parsed, true, uint32(len(comment))) + out := appledouble.Build(parsed, true, uint32(len(comment))) return b.writeFile(adPath, out) } @@ -642,11 +657,11 @@ func (b *AppleDoubleBackend) removeAppleDoubleCommentPath(adPath string) error { return err } - parsed, _ := parseAppleDoubleBytes(bts) - parsed.comment = nil - parsed.hasComment = false + parsed, _ := appledouble.Parse(bts) + parsed.Comment = nil + parsed.HasComment = false - out := buildAppleDoubleBytes(parsed, true, 0) + out := appledouble.Build(parsed, true, 0) return b.writeFile(adPath, out) } @@ -655,15 +670,15 @@ func (b *AppleDoubleBackend) readAppleDoubleCommentPath(adPath string) ([]byte, if err != nil { return nil, false } - parsed, err := parseAppleDoubleBytes(bts) + parsed, err := appledouble.Parse(bts) if err != nil { return nil, false } - if !parsed.hasComment || len(parsed.comment) == 0 { + if !parsed.HasComment || len(parsed.Comment) == 0 { return nil, false } - if len(parsed.comment) > 128 { - return parsed.comment[:128], true + if len(parsed.Comment) > 128 { + return parsed.Comment[:128], true } - return parsed.comment, true + return parsed.Comment, true } diff --git a/service/afp/appledouble_backend_test.go b/service/afp/appledouble_backend_test.go index f389ecb..aa98dda 100644 --- a/service/afp/appledouble_backend_test.go +++ b/service/afp/appledouble_backend_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -118,13 +120,13 @@ func TestPerVolumeAppleDoubleMode(t *testing.T) { modernRoot := t.TempDir() legacyRoot := t.TempDir() - s := NewAFPService("TestServer", + s := NewService("TestServer", []VolumeConfig{ {Name: "Modern", Path: modernRoot, AppleDoubleMode: AppleDoubleModeModern}, {Name: "Legacy", Path: legacyRoot, AppleDoubleMode: AppleDoubleModeLegacy}, }, &LocalFileSystem{}, nil, - AFPOptions{DecomposedFilenames: true}, + Options{DecomposedFilenames: true}, ) // Volume 1 == Modern, Volume 2 == Legacy (IDs assigned by NewAFPService). @@ -224,13 +226,13 @@ func TestHandleCopyFile_ConvertsAppleDoubleModeBetweenVolumes(t *testing.T) { srcRoot := t.TempDir() dstRoot := t.TempDir() - s := NewAFPService("TestServer", + s := NewService("TestServer", []VolumeConfig{ {Name: "Source", Path: srcRoot, AppleDoubleMode: tc.srcMode}, {Name: "Target", Path: dstRoot, AppleDoubleMode: tc.dstMode}, }, &LocalFileSystem{}, nil, - AFPOptions{DecomposedFilenames: true}, + Options{DecomposedFilenames: true}, ) const srcVolID = uint16(1) @@ -354,13 +356,13 @@ func TestHandleCopyFile_DstPathTypeZeroIgnoresDstDirMarkerPayload(t *testing.T) srcRoot := t.TempDir() dstRoot := t.TempDir() - s := NewAFPService("TestServer", + s := NewService("TestServer", []VolumeConfig{ {Name: "Source", Path: srcRoot, AppleDoubleMode: AppleDoubleModeModern}, {Name: "Target", Path: dstRoot, AppleDoubleMode: AppleDoubleModeLegacy}, }, &LocalFileSystem{}, nil, - AFPOptions{DecomposedFilenames: true}, + Options{DecomposedFilenames: true}, ) const srcVolID = uint16(1) @@ -408,7 +410,7 @@ func TestHandleCopyFile_PreservesInfinityWhenNewNameEmpty(t *testing.T) { srcRoot := t.TempDir() dstRoot := t.TempDir() - s := NewAFPService("TestServer", + s := NewService("TestServer", []VolumeConfig{{Name: "Source", Path: srcRoot}, {Name: "Target", Path: dstRoot}}, &LocalFileSystem{}, nil, ) @@ -447,7 +449,7 @@ func TestHandleCopyFile_DecodesMacRomanNewName(t *testing.T) { srcRoot := t.TempDir() dstRoot := t.TempDir() - s := NewAFPService("TestServer", + s := NewService("TestServer", []VolumeConfig{{Name: "Source", Path: srcRoot}, {Name: "Target", Path: dstRoot}}, &LocalFileSystem{}, nil, ) diff --git a/service/afp/appledouble_fallback_test.go b/service/afp/appledouble_fallback_test.go index fa99b50..701dbae 100644 --- a/service/afp/appledouble_fallback_test.go +++ b/service/afp/appledouble_fallback_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -8,7 +10,7 @@ import ( func TestStatPathWithAppleDoubleFallback_FindsSidecar(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) baseName := "Netscape Navigator\u2122 2.02" sidecar := filepath.Join(root, "._"+baseName) @@ -31,7 +33,7 @@ func TestStatPathWithAppleDoubleFallback_FindsSidecar(t *testing.T) { func TestHandleGetFileDirParms_FallsBackToAppleDoubleName(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) baseName := "Netscape Navigator\u2122 2.02" sidecar := filepath.Join(root, "._"+baseName) @@ -62,11 +64,11 @@ func TestHandleGetFileDirParms_FallsBackToAppleDoubleName(t *testing.T) { func TestHandleRemoveComment_FallsBackToAppleDoubleName(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) db := NewDesktopDB(root) - s.desktopDBs[1] = db - s.dtRefs[1] = 1 + s.desktop.putDBForTest(1, db) + s.desktop.putRefForTest(1, 1) baseName := "Netscape Navigator\u2122 2.02" targetPath := filepath.Join(root, baseName) @@ -96,8 +98,8 @@ func TestHandleRemoveComment_FallsBackToAppleDoubleName(t *testing.T) { func TestHandleGetComment_FallsBackToUnicodeAppleDoubleName(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) - s.dtRefs[1] = 1 + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s.desktop.putRefForTest(1, 1) targetPath := filepath.Join(root, "CD-ROM Toolkit™ Installer") commentBackend, ok := s.metaFor(1).(CommentBackend) @@ -126,8 +128,8 @@ func TestHandleGetComment_FallsBackToUnicodeAppleDoubleName(t *testing.T) { func TestHandleRemoveComment_FallsBackToUnicodeAppleDoubleName(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) - s.dtRefs[1] = 1 + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s.desktop.putRefForTest(1, 1) targetPath := filepath.Join(root, "CD-ROM Toolkit™ Installer") commentBackend, ok := s.metaFor(1).(CommentBackend) @@ -156,9 +158,9 @@ func TestHandleRemoveComment_FallsBackToUnicodeAppleDoubleName(t *testing.T) { func TestStatPathWithAppleDoubleFallback_LegacyIconCarriageReturnAlias(t *testing.T) { root := t.TempDir() - options := DefaultAFPOptions() + options := DefaultOptions() options.AppleDoubleMode = AppleDoubleModeLegacy - s := NewAFPService( + s := NewService( "TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, @@ -186,16 +188,16 @@ func TestStatPathWithAppleDoubleFallback_LegacyIconCarriageReturnAlias(t *testin func TestHandleGetComment_LegacyIconCarriageReturnAlias(t *testing.T) { root := t.TempDir() - options := DefaultAFPOptions() + options := DefaultOptions() options.AppleDoubleMode = AppleDoubleModeLegacy - s := NewAFPService( + s := NewService( "TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil, options, ) - s.dtRefs[1] = 1 + s.desktop.putRefForTest(1, 1) actual := filepath.Join(root, "Icon_") if err := os.WriteFile(actual, []byte("icon"), 0644); err != nil { @@ -228,17 +230,17 @@ func TestHandleGetComment_LegacyIconCarriageReturnAlias(t *testing.T) { func TestHandleAddAPPL_LegacyIconCarriageReturnAlias(t *testing.T) { root := t.TempDir() - options := DefaultAFPOptions() + options := DefaultOptions() options.AppleDoubleMode = AppleDoubleModeLegacy - s := NewAFPService( + s := NewService( "TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil, options, ) - s.desktopDBs[1] = NewDesktopDB(root) - s.dtRefs[1] = 1 + s.desktop.putDBForTest(1, NewDesktopDB(root)) + s.desktop.putRefForTest(1, 1) actual := filepath.Join(root, "Icon_") if err := os.WriteFile(actual, []byte("icon"), 0644); err != nil { @@ -264,9 +266,9 @@ func TestHandleAddAPPL_LegacyIconCarriageReturnAlias(t *testing.T) { func TestHandleGetAPPL_LegacyIconCarriageReturnAlias(t *testing.T) { root := t.TempDir() - options := DefaultAFPOptions() + options := DefaultOptions() options.AppleDoubleMode = AppleDoubleModeLegacy - s := NewAFPService( + s := NewService( "TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, @@ -274,8 +276,8 @@ func TestHandleGetAPPL_LegacyIconCarriageReturnAlias(t *testing.T) { options, ) db := NewDesktopDB(root) - s.desktopDBs[1] = db - s.dtRefs[1] = 1 + s.desktop.putDBForTest(1, db) + s.desktop.putRefForTest(1, 1) actual := filepath.Join(root, "Icon_") if err := os.WriteFile(actual, []byte("icon"), 0644); err != nil { diff --git a/service/afp/appledouble_lifecycle_test.go b/service/afp/appledouble_lifecycle_test.go index 9408ac3..05d1fc0 100644 --- a/service/afp/appledouble_lifecycle_test.go +++ b/service/afp/appledouble_lifecycle_test.go @@ -1,21 +1,25 @@ +//go:build afp || all + package afp import ( "os" "path/filepath" "testing" + + "github.com/pgodw/omnitalk/pkg/appledouble" ) func TestHandleRename_MovesAppleDoubleSidecar(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) oldName := "Configuration" newName := "Configuration Renamed" oldPath := filepath.Join(root, oldName) newPath := filepath.Join(root, newName) - oldAD := appleDoublePath(oldPath) - newAD := appleDoublePath(newPath) + oldAD := appledouble.SidecarPath(oldPath) + newAD := appledouble.SidecarPath(newPath) if err := os.WriteFile(oldPath, []byte("x"), 0644); err != nil { t.Fatalf("seed file: %v", err) @@ -45,7 +49,7 @@ func TestHandleRename_MovesAppleDoubleSidecar(t *testing.T) { func TestHandleRename_DecodesMacRomanNewNameAndMovesSidecar(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) oldName := "Seed" newAFPName := string([]byte{'M', 'a', 'r', 'a', 't', 'h', 'o', 'n', ' ', 0xB0, ' ', '1', '.', '5'}) @@ -53,8 +57,8 @@ func TestHandleRename_DecodesMacRomanNewNameAndMovesSidecar(t *testing.T) { oldPath := filepath.Join(root, oldName) newPath := filepath.Join(root, newHostName) - oldAD := appleDoublePath(oldPath) - newAD := appleDoublePath(newPath) + oldAD := appledouble.SidecarPath(oldPath) + newAD := appledouble.SidecarPath(newPath) if err := os.WriteFile(oldPath, []byte("x"), 0644); err != nil { t.Fatalf("seed file: %v", err) @@ -91,7 +95,7 @@ func TestHandleRename_DecodesMacRomanNewNameAndMovesSidecar(t *testing.T) { func TestHandleMoveAndRename_MovesAppleDoubleSidecar(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) srcDir := filepath.Join(root, "src") dstDir := filepath.Join(root, "dst") @@ -109,8 +113,8 @@ func TestHandleMoveAndRename_MovesAppleDoubleSidecar(t *testing.T) { newName := "Configuration Moved" srcPath := filepath.Join(srcDir, srcName) dstPath := filepath.Join(dstDir, newName) - srcAD := appleDoublePath(srcPath) - dstAD := appleDoublePath(dstPath) + srcAD := appledouble.SidecarPath(srcPath) + dstAD := appledouble.SidecarPath(dstPath) if err := os.WriteFile(srcPath, []byte("x"), 0644); err != nil { t.Fatalf("seed file: %v", err) @@ -143,7 +147,7 @@ func TestHandleMoveAndRename_MovesAppleDoubleSidecar(t *testing.T) { func TestHandleMoveAndRename_LegacyMovesAppleDoubleSidecar(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root, AppleDoubleMode: AppleDoubleModeLegacy}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root, AppleDoubleMode: AppleDoubleModeLegacy}}, &LocalFileSystem{}, nil) srcDir := filepath.Join(root, "src") dstDir := filepath.Join(root, "dst") @@ -198,7 +202,7 @@ func TestHandleMoveAndRename_LegacyMovesAppleDoubleSidecar(t *testing.T) { func TestHandleMoveAndRename_DstPathTypeZeroIgnoresDstDirMarkerPayload(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) srcDir := filepath.Join(root, "src") dstDir := filepath.Join(root, "dst") @@ -240,7 +244,7 @@ func TestHandleMoveAndRename_DstPathTypeZeroIgnoresDstDirMarkerPayload(t *testin func TestHandleMoveAndRename_DecodesMacRomanNewName(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) srcDir := filepath.Join(root, "src") dstDir := filepath.Join(root, "dst") @@ -282,11 +286,11 @@ func TestHandleMoveAndRename_DecodesMacRomanNewName(t *testing.T) { func TestHandleDelete_DeletesAppleDoubleSidecar(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) name := "Configuration" targetPath := filepath.Join(root, name) - targetAD := appleDoublePath(targetPath) + targetAD := appledouble.SidecarPath(targetPath) if err := os.WriteFile(targetPath, []byte("x"), 0644); err != nil { t.Fatalf("seed file: %v", err) diff --git a/service/afp/catsearch.go b/service/afp/catsearch.go new file mode 100644 index 0000000..aea308e --- /dev/null +++ b/service/afp/catsearch.go @@ -0,0 +1,128 @@ +//go:build afp || all + +package afp + +import ( + "bytes" + "encoding/binary" + "path/filepath" + "strings" + + "github.com/pgodw/omnitalk/netlog" +) + +// catSearchMaxDataLen is the maximum bytes of ResultsRecord data per reply. +// Based on one ATP packet: ATPMaxData(578) minus the 21-byte ASP/AFP reply header. +const catSearchMaxDataLen = 500 //557 + +func (s *Service) handleCatSearch(req *FPCatSearchReq) (*FPCatSearchRes, int32) { + if req.ReqMatches <= 0 { + return &FPCatSearchRes{}, ErrParamErr + } + volumeRoot, ok := s.volumeRootByID(req.VolumeID) + if !ok { + return &FPCatSearchRes{}, ErrParamErr + } + searchFS := s.fsForVolume(req.VolumeID) + if searchFS == nil || !searchFS.Capabilities().CatSearch { + return &FPCatSearchRes{}, ErrCallNotSupported + } + query := strings.TrimSpace(req.SearchQuery()) + netlog.Info("[AFP][CatSearch] volume=%d reqMatches=%d reqBitmap=0x%08x paramsLen=%d query=%q", req.VolumeID, req.ReqMatches, req.ReqBitmap, len(req.Parameters), query) + if query == "" { + return &FPCatSearchRes{}, ErrParamErr + } + paths, nextCursor, errCode := searchFS.CatSearch(volumeRoot, query, req.ReqMatches, req.CatalogPosition) + if errCode != NoErr { + return &FPCatSearchRes{}, errCode + } + + fileBitmap := req.FileRsltBitmap + dirBitmap := req.DirectoryRsltBitmap + if fileBitmap == 0 && dirBitmap == 0 { + dirBitmap = DirBitmapLongName | DirBitmapDirID | DirBitmapParentDID + } + + // Decode the incoming cursor to know our starting offset in the backend cache. + incomingOffset := binary.BigEndian.Uint32(req.CatalogPosition[4:8]) + + data := new(bytes.Buffer) + actCount := int32(0) + pathsConsumed := 0 + + for i, absPath := range paths { + if actCount >= req.ReqMatches { + pathsConsumed = i + break + } + info, err := searchFS.Stat(absPath) + if err != nil { + continue + } + if !info.IsDir() { + continue + } + + entryBuf := new(bytes.Buffer) + entryBuf.WriteByte(0) + entryBuf.WriteByte(0x80) + parent := filepath.Dir(absPath) + name := filepath.Base(absPath) + s.packFileInfo(entryBuf, req.VolumeID, dirBitmap, parent, name, info, true) + entry := entryBuf.Bytes() + if len(entry)%2 != 0 { + entryBuf.WriteByte(0) + entry = entryBuf.Bytes() + } + // Per AFP CatSearch ResultsRecord format, StructLength excludes + // the StructLength byte itself and the FileDir byte. + entry[0] = byte(len(entry) - 2) + + if data.Len()+len(entry) > catSearchMaxDataLen { + netlog.Debug("[AFP][CatSearch] stopping at payload cap: entries=%d dataLen=%d nextEntry=%d cap=%d", actCount, data.Len(), len(entry), catSearchMaxDataLen) + pathsConsumed = i + break + } + + data.Write(entry) + actCount++ + pathsConsumed = i + 1 + } + + // Determine the reply cursor. + // If we stopped early due to payload cap, synthesize a continuation cursor so the + // client resumes from the correct offset rather than re-starting the search. + replyCursor := nextCursor + if pathsConsumed < len(paths) { + replyCursor = [16]byte{} + replyCursor[0] = 0x01 // continuation flag + // Carry the query hash from the backend cursor (bytes 1-3). + replyCursor[1] = nextCursor[1] + replyCursor[2] = nextCursor[2] + replyCursor[3] = nextCursor[3] + nextOffset := incomingOffset + uint32(pathsConsumed) + replyCursor[4] = byte(nextOffset >> 24) + replyCursor[5] = byte(nextOffset >> 16) + replyCursor[6] = byte(nextOffset >> 8) + replyCursor[7] = byte(nextOffset) + netlog.Debug("[AFP][CatSearch] payload cap: synthesized continuation cursor offset=%d", nextOffset) + } + + res := &FPCatSearchRes{ + CatalogPosition: replyCursor, + FileRsltBitmap: fileBitmap, + DirectoryRsltBitmap: dirBitmap, + ActualCount: actCount, + Data: data.Bytes(), + } + + // Per AFP spec (matching Netatalk): return ErrEOFErr when this is the last page + // (no more results to follow). Return NoErr only when more pages follow. + if actCount == 0 || replyCursor[0] != 0x01 { + netlog.Debug("[AFP][CatSearch] returning %d results (last page)", actCount) + return res, ErrEOFErr + } + netlog.Debug("[AFP][CatSearch] returning %d results with cursor continuation=true offset=%d", actCount, + binary.BigEndian.Uint32(replyCursor[4:8])) + return res, NoErr +} diff --git a/service/afp/catsearch_test.go b/service/afp/catsearch_test.go new file mode 100644 index 0000000..0200fa1 --- /dev/null +++ b/service/afp/catsearch_test.go @@ -0,0 +1,271 @@ +//go:build afp || all + +package afp + +import ( + "bytes" + "io/fs" + "path/filepath" + "strconv" + "strings" + "testing" + "time" +) + +type catSearchDirInfo struct{ name string } + +func (i *catSearchDirInfo) Name() string { return i.name } +func (i *catSearchDirInfo) Size() int64 { return 0 } +func (i *catSearchDirInfo) Mode() fs.FileMode { return fs.ModeDir | 0o755 } +func (i *catSearchDirInfo) ModTime() time.Time { return time.Time{} } +func (i *catSearchDirInfo) IsDir() bool { return true } +func (i *catSearchDirInfo) Sys() any { return nil } + +type catSearchCaptureFS struct { + root string + lastQuery string + paths []string +} + +func (f *catSearchCaptureFS) ReadDir(path string) ([]fs.DirEntry, error) { + return nil, nil +} + +func (f *catSearchCaptureFS) Stat(path string) (fs.FileInfo, error) { + clean := filepath.Clean(path) + if clean == filepath.Clean(f.root) { + return &catSearchDirInfo{name: filepath.Base(path)}, nil + } + rel, err := filepath.Rel(filepath.Clean(f.root), clean) + if err == nil && rel != ".." && !strings.HasPrefix(rel, ".."+string(filepath.Separator)) { + return &catSearchDirInfo{name: filepath.Base(path)}, nil + } + return nil, fs.ErrNotExist +} + +func (f *catSearchCaptureFS) DiskUsage(path string) (uint64, uint64, error) { return 0, 0, nil } +func (f *catSearchCaptureFS) CreateDir(path string) error { return fs.ErrPermission } +func (f *catSearchCaptureFS) CreateFile(path string) (File, error) { return nil, fs.ErrPermission } +func (f *catSearchCaptureFS) OpenFile(path string, flag int) (File, error) { + return nil, fs.ErrPermission +} +func (f *catSearchCaptureFS) Remove(path string) error { return fs.ErrPermission } +func (f *catSearchCaptureFS) Rename(oldpath, newpath string) error { return fs.ErrPermission } + +func (f *catSearchCaptureFS) Capabilities() FileSystemCapabilities { + return FileSystemCapabilities{CatSearch: true} +} + +func (f *catSearchCaptureFS) ReadDirRange(path string, startIndex uint16, reqCount uint16) ([]fs.DirEntry, uint16, error) { + return nil, 0, newNotSupported("ReadDirRange") +} + +func (f *catSearchCaptureFS) ChildCount(path string) (uint16, error) { + return 0, newNotSupported("ChildCount") +} + +func (f *catSearchCaptureFS) DirAttributes(path string) (uint16, error) { + return 0, newNotSupported("DirAttributes") +} + +func (f *catSearchCaptureFS) IsReadOnly(path string) (bool, error) { + return false, nil +} + +func (f *catSearchCaptureFS) SupportsCatSearch(path string) (bool, error) { + return true, nil +} + +func (f *catSearchCaptureFS) CatSearch(volumeRoot string, query string, reqMatches int32, cursor [16]byte) ([]string, [16]byte, int32) { + f.lastQuery = query + return append([]string(nil), f.paths...), cursor, NoErr +} + +func TestFPCatSearchReq_SearchQuery_ParsesFinderPattern(t *testing.T) { + req := &FPCatSearchReq{Parameters: []byte(". \" clarisworks$ @ \" type:app,game")} + if got := req.SearchQuery(); got != ". \" clarisworks$ @ \" type:app,game" { + t.Fatalf("SearchQuery() = %q, want %q", got, ". \" clarisworks$ @ \" type:app,game") + } +} + +func TestHandleCatSearch_UsesParsedQuery(t *testing.T) { + root := filepath.Clean(t.TempDir()) + captureFS := &catSearchCaptureFS{root: root} + s := NewService("TestServer", []VolumeConfig{{Name: "Garden", Path: root}}, captureFS, nil) + + req := &FPCatSearchReq{ + VolumeID: 1, + ReqMatches: 30, + FileRsltBitmap: FileBitmapParentDID | FileBitmapLongName, + DirectoryRsltBitmap: DirBitmapParentDID | DirBitmapLongName, + ReqBitmap: 0x80000060, + Parameters: []byte(". \" clarisworks$ @ \" type:app,game"), + } + + _, errCode := s.handleCatSearch(req) + if errCode != ErrEOFErr { + t.Fatalf("handleCatSearch errCode=%d, want %d", errCode, ErrEOFErr) + } + if captureFS.lastQuery != ". \" clarisworks$ @ \" type:app,game" { + t.Fatalf("captured query = %q, want %q", captureFS.lastQuery, ". \" clarisworks$ @ \" type:app,game") + } +} + +func TestFPCatSearchReq_String_LogsQueryAndParams(t *testing.T) { + req := &FPCatSearchReq{Parameters: []byte(". \" clarisworks$ @ \" type:app,game")} + s := req.String() + if !bytes.Contains([]byte(s), []byte("Query:\". \\\" clarisworks$ @ \\\" type:app,game\"")) { + t.Fatalf("String() missing parsed Query field: %q", s) + } + if !bytes.Contains([]byte(s), []byte("Params:\". \\\" clarisworks$ @ \\\" type:app,game\"")) { + t.Fatalf("String() missing Params field: %q", s) + } +} + +func TestHandleCatSearch_RespectsPayloadCap(t *testing.T) { + root := filepath.Clean(t.TempDir()) + paths := make([]string, 0, 40) + for i := 0; i < 40; i++ { + name := "Spectre Result " + strconv.Itoa(i) + " " + strings.Repeat("X", 24) + paths = append(paths, filepath.Join(root, name)) + } + captureFS := &catSearchCaptureFS{root: root, paths: paths} + s := NewService("TestServer", []VolumeConfig{{Name: "Garden", Path: root}}, captureFS, nil) + + req := &FPCatSearchReq{ + VolumeID: 1, + ReqMatches: 30, + FileRsltBitmap: FileBitmapParentDID | FileBitmapLongName, + DirectoryRsltBitmap: DirBitmapParentDID | DirBitmapLongName, + ReqBitmap: 0x80000060, + Parameters: []byte("* \" spectre$ @ \""), + } + + res, errCode := s.handleCatSearch(req) + // ErrEOFErr is the expected "last page" code when no continuation cursor is set. + if errCode != NoErr && errCode != ErrEOFErr { + t.Fatalf("handleCatSearch errCode=%d, want NoErr or ErrEOFErr", errCode) + } + if res.ActualCount == 0 { + t.Fatalf("ActualCount=%d, want > 0", res.ActualCount) + } + if len(res.Data) > catSearchMaxDataLen { + t.Fatalf("DataLen=%d, want <= %d", len(res.Data), catSearchMaxDataLen) + } + if len(res.Marshal()) >= 578 { + t.Fatalf("MarshalLen=%d, want < 578 to avoid SPErrorBufTooSmall", len(res.Marshal())) + } +} + +func TestMacGardenCatSearch_PaginationCursor(t *testing.T) { + // Test that pagination cursor properly signals continuation + root := filepath.Clean(t.TempDir()) + paths := make([]string, 0, 50) + for i := 0; i < 50; i++ { + name := "Item" + strconv.Itoa(i) + paths = append(paths, filepath.Join(root, name)) + } + captureFS := &catSearchCaptureFS{root: root, paths: paths} + s := NewService("TestServer", []VolumeConfig{{Name: "Garden", Path: root}}, captureFS, nil) + + req := &FPCatSearchReq{ + VolumeID: 1, + ReqMatches: 10, + FileRsltBitmap: FileBitmapParentDID | FileBitmapLongName, + DirectoryRsltBitmap: DirBitmapParentDID | DirBitmapLongName, + ReqBitmap: 0x80000060, + Parameters: []byte("test search"), + } + + // First request: should return some results with continuation flag set + res1, errCode1 := s.handleCatSearch(req) + if errCode1 != NoErr && errCode1 != ErrEOFErr { + t.Fatalf("handleCatSearch errCode=%d, want NoErr or ErrEOFErr", errCode1) + } + firstCount := res1.ActualCount + firstCursor := res1.CatalogPosition + + if firstCount == 0 { + t.Fatalf("First request ActualCount=%d, want > 0", firstCount) + } + + // Check if cursor indicates more available + hasMore := firstCursor[0] == 0x01 + if !hasMore { + t.Logf("First request returned %d results with no continuation (all results fit)", firstCount) + // This is OK if all results fit in one response + return + } + + t.Logf("First request returned %d results with continuation flag set", firstCount) + + // Second request: use the cursor to continue + req.CatalogPosition = firstCursor + res2, errCode2 := s.handleCatSearch(req) + if errCode2 != NoErr && errCode2 != ErrEOFErr { + t.Fatalf("Second handleCatSearch errCode=%d, want NoErr or ErrEOFErr", errCode2) + } + + secondCount := res2.ActualCount + if secondCount == 0 && errCode2 != ErrEOFErr { + t.Fatalf("Second request ActualCount=%d but errCode=%d (not ErrEOFErr)", secondCount, errCode2) + } + + t.Logf("Second request returned %d results (total so far: %d)", secondCount, firstCount+secondCount) +} + +func TestHandleCatSearch_ResultsRecordStructLengthIsSpecCompliant(t *testing.T) { + root := filepath.Clean(t.TempDir()) + paths := []string{ + filepath.Join(root, "Spectre 128"), + filepath.Join(root, "Spectre GCR"), + filepath.Join(root, "Spectre 3.0"), + } + captureFS := &catSearchCaptureFS{root: root, paths: paths} + s := NewService("TestServer", []VolumeConfig{{Name: "Garden", Path: root}}, captureFS, nil) + + req := &FPCatSearchReq{ + VolumeID: 1, + ReqMatches: 30, + FileRsltBitmap: FileBitmapParentDID | FileBitmapLongName, + DirectoryRsltBitmap: DirBitmapParentDID | DirBitmapLongName, + ReqBitmap: 0x80000060, + Parameters: []byte("spectre"), + } + + res, errCode := s.handleCatSearch(req) + // ErrEOFErr is the expected "last page" code when no continuation cursor is set. + if errCode != NoErr && errCode != ErrEOFErr { + t.Fatalf("handleCatSearch errCode=%d, want NoErr or ErrEOFErr", errCode) + } + if res.ActualCount == 0 { + t.Fatalf("ActualCount=%d, want > 0", res.ActualCount) + } + + // Walk the concatenated ResultsRecord list using spec semantics: + // StructLength excludes StructLength byte + FileDir byte. + off := 0 + records := 0 + for off < len(res.Data) { + if off+2 > len(res.Data) { + t.Fatalf("truncated record header at off=%d len=%d", off, len(res.Data)) + } + structLen := int(res.Data[off]) + recordLen := structLen + 2 + if recordLen < 2 { + t.Fatalf("invalid recordLen=%d at off=%d", recordLen, off) + } + if off+recordLen > len(res.Data) { + t.Fatalf("record overruns payload: off=%d recordLen=%d dataLen=%d", off, recordLen, len(res.Data)) + } + records++ + off += recordLen + } + + if off != len(res.Data) { + t.Fatalf("record walk ended at off=%d, want dataLen=%d", off, len(res.Data)) + } + if records != int(res.ActualCount) { + t.Fatalf("walked records=%d, want ActualCount=%d", records, res.ActualCount) + } +} diff --git a/service/afp/cnid.go b/service/afp/cnid.go index 4cd897a..23e5cd7 100644 --- a/service/afp/cnid.go +++ b/service/afp/cnid.go @@ -1,34 +1,43 @@ +//go:build afp || all + package afp import ( - "database/sql" - "path/filepath" - "strings" - "sync" - - "github.com/pgodw/omnitalk/go/netlog" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/pkg/cnid" ) +// CNID constants and the Store interface now live in pkg/cnid. These +// aliases preserve the historical AFP-package identifiers so the +// existing fork/directory/volume code keeps compiling unchanged during +// the lift-and-shift. New code should import pkg/cnid directly. const ( - CNIDInvalid uint32 = 0 - CNIDParentOfRoot uint32 = 1 - CNIDRoot uint32 = 2 - firstDynamicCNID uint32 = 3 + CNIDInvalid = cnid.Invalid + CNIDParentOfRoot = cnid.ParentOfRoot + CNIDRoot = cnid.Root +) + +type ( + // CNIDStore is the AFP-package alias for cnid.Store. + CNIDStore = cnid.Store + // MemoryCNIDStore is the AFP-package alias for cnid.MemoryStore. + MemoryCNIDStore = cnid.MemoryStore + // SQLiteCNIDStore is the AFP-package alias for cnid.SQLiteStore. + SQLiteCNIDStore = cnid.SQLiteStore ) -// CNIDStore tracks the mapping between AFP catalog node IDs and current paths -// for a single volume. -type CNIDStore interface { - RootID() uint32 - Path(cnid uint32) (string, bool) - CNID(path string) (uint32, bool) - Ensure(path string) uint32 - EnsureReserved(path string, cnid uint32) uint32 - Rebind(oldPath, newPath string) - Remove(path string) +// NewMemoryCNIDStore is the AFP-package alias for cnid.NewMemoryStore. +func NewMemoryCNIDStore() *MemoryCNIDStore { return cnid.NewMemoryStore() } + +// NewSQLiteCNIDStore is the AFP-package alias for cnid.NewSQLiteStore. +func NewSQLiteCNIDStore(volumeRootPath string) (*SQLiteCNIDStore, error) { + return cnid.NewSQLiteStore(volumeRootPath) } -// CNIDBackend creates a per-volume CNID store implementation. +// CNIDBackend creates a per-volume CNID store. The backend abstraction +// stays in service/afp because it is coupled to the AFP Volume type; +// later commits may introduce a pkg/cnid Factory if other services need +// per-volume backend selection. type CNIDBackend interface { Open(volume Volume) CNIDStore } @@ -37,22 +46,22 @@ type CNIDBackend interface { type MemoryCNIDBackend struct{} func (MemoryCNIDBackend) Open(volume Volume) CNIDStore { - return NewMemoryCNIDStore() + return cnid.NewMemoryStore() } // SQLiteCNIDBackend stores CNIDs in a per-volume SQLite database. type SQLiteCNIDBackend struct{} func (SQLiteCNIDBackend) Open(volume Volume) CNIDStore { - store, err := NewSQLiteCNIDStore(volume.Config.Path) + store, err := cnid.NewSQLiteStore(volume.Config.Path) if err != nil { netlog.Warn("[AFP][CNID] sqlite init failed for volume=%q path=%q: %v; falling back to memory", volume.Config.Name, volume.Config.Path, err) - return NewMemoryCNIDStore() + return cnid.NewMemoryStore() } return store } -func resolveCNIDBackend(options AFPOptions) CNIDBackend { +func resolveCNIDBackend(options Options) CNIDBackend { if options.CNIDStoreBackend != nil { return options.CNIDStoreBackend } @@ -65,348 +74,3 @@ func resolveCNIDBackend(options AFPOptions) CNIDBackend { return SQLiteCNIDBackend{} } } - -// SQLiteCNIDStore keeps CNIDs in SQLite for persistence across restarts. -type SQLiteCNIDStore struct { - mu sync.Mutex - db *sql.DB -} - -func NewSQLiteCNIDStore(volumeRootPath string) (*SQLiteCNIDStore, error) { - db, err := openSQLiteDB(volumeRootPath) - if err != nil { - return nil, err - } - store := &SQLiteCNIDStore{db: db} - if err := store.initSchema(); err != nil { - db.Close() - return nil, err - } - return store, nil -} - -func (s *SQLiteCNIDStore) initSchema() error { - _, err := s.db.Exec(` - CREATE TABLE IF NOT EXISTS cnid_paths ( - cnid INTEGER PRIMARY KEY, - path TEXT NOT NULL UNIQUE - ); - CREATE INDEX IF NOT EXISTS idx_cnid_paths_path ON cnid_paths(path); - `) - return err -} - -func (s *SQLiteCNIDStore) RootID() uint32 { return CNIDRoot } - -func (s *SQLiteCNIDStore) Path(cnid uint32) (string, bool) { - var path string - err := s.db.QueryRow("SELECT path FROM cnid_paths WHERE cnid = ?", cnid).Scan(&path) - if err != nil { - return "", false - } - return path, true -} - -func (s *SQLiteCNIDStore) CNID(path string) (uint32, bool) { - path = filepath.Clean(path) - var cnid uint32 - err := s.db.QueryRow("SELECT cnid FROM cnid_paths WHERE path = ?", path).Scan(&cnid) - if err != nil { - return 0, false - } - return cnid, true -} - -func (s *SQLiteCNIDStore) Ensure(path string) uint32 { - path = filepath.Clean(path) - - s.mu.Lock() - defer s.mu.Unlock() - - tx, err := s.db.Begin() - if err != nil { - return CNIDInvalid - } - defer tx.Rollback() - - if cnid, ok := selectCNIDByPathTx(tx, path); ok { - _ = tx.Commit() - return cnid - } - - cnid, err := nextAvailableCNIDTx(tx) - if err != nil { - return CNIDInvalid - } - if _, err := tx.Exec("INSERT INTO cnid_paths(cnid, path) VALUES(?, ?)", cnid, path); err != nil { - return CNIDInvalid - } - if err := tx.Commit(); err != nil { - return CNIDInvalid - } - return cnid -} - -func (s *SQLiteCNIDStore) EnsureReserved(path string, cnid uint32) uint32 { - path = filepath.Clean(path) - - s.mu.Lock() - defer s.mu.Unlock() - - tx, err := s.db.Begin() - if err != nil { - return CNIDInvalid - } - defer tx.Rollback() - - if existing, ok := selectCNIDByPathTx(tx, path); ok { - _ = tx.Commit() - return existing - } - - if existingPath, ok := selectPathByCNIDTx(tx, cnid); ok && existingPath != path { - if _, err := tx.Exec("DELETE FROM cnid_paths WHERE cnid = ?", cnid); err != nil { - return CNIDInvalid - } - } - - if _, err := tx.Exec("INSERT INTO cnid_paths(cnid, path) VALUES(?, ?)", cnid, path); err != nil { - return CNIDInvalid - } - if err := tx.Commit(); err != nil { - return CNIDInvalid - } - return cnid -} - -func (s *SQLiteCNIDStore) Rebind(oldPath, newPath string) { - oldPath = filepath.Clean(oldPath) - newPath = filepath.Clean(newPath) - prefix := oldPath + string(filepath.Separator) - - s.mu.Lock() - defer s.mu.Unlock() - - tx, err := s.db.Begin() - if err != nil { - return - } - defer tx.Rollback() - - rows, err := tx.Query("SELECT cnid, path FROM cnid_paths") - if err != nil { - return - } - defer rows.Close() - - type row struct { - cnid uint32 - path string - } - updates := make([]row, 0) - for rows.Next() { - var r row - if err := rows.Scan(&r.cnid, &r.path); err != nil { - return - } - if r.path != oldPath && !strings.HasPrefix(r.path, prefix) { - continue - } - updates = append(updates, r) - } - for _, r := range updates { - suffix := strings.TrimPrefix(r.path, oldPath) - mapped := filepath.Clean(newPath + suffix) - if _, err := tx.Exec("UPDATE cnid_paths SET path = ? WHERE cnid = ?", mapped, r.cnid); err != nil { - return - } - } - _ = tx.Commit() -} - -func (s *SQLiteCNIDStore) Remove(path string) { - path = filepath.Clean(path) - prefix := path + string(filepath.Separator) - - s.mu.Lock() - defer s.mu.Unlock() - - tx, err := s.db.Begin() - if err != nil { - return - } - defer tx.Rollback() - - rows, err := tx.Query("SELECT cnid, path FROM cnid_paths") - if err != nil { - return - } - defer rows.Close() - - toDelete := make([]uint32, 0) - for rows.Next() { - var cnid uint32 - var current string - if err := rows.Scan(&cnid, ¤t); err != nil { - return - } - if current == path || strings.HasPrefix(current, prefix) { - toDelete = append(toDelete, cnid) - } - } - for _, cnid := range toDelete { - if _, err := tx.Exec("DELETE FROM cnid_paths WHERE cnid = ?", cnid); err != nil { - return - } - } - _ = tx.Commit() -} - -func selectCNIDByPathTx(tx *sql.Tx, path string) (uint32, bool) { - var cnid uint32 - err := tx.QueryRow("SELECT cnid FROM cnid_paths WHERE path = ?", path).Scan(&cnid) - if err != nil { - return 0, false - } - return cnid, true -} - -func selectPathByCNIDTx(tx *sql.Tx, cnid uint32) (string, bool) { - var path string - err := tx.QueryRow("SELECT path FROM cnid_paths WHERE cnid = ?", cnid).Scan(&path) - if err != nil { - return "", false - } - return path, true -} - -func nextAvailableCNIDTx(tx *sql.Tx) (uint32, error) { - var maxCNID uint32 - if err := tx.QueryRow("SELECT COALESCE(MAX(cnid), 0) FROM cnid_paths").Scan(&maxCNID); err != nil { - return 0, err - } - if maxCNID < firstDynamicCNID-1 { - return firstDynamicCNID, nil - } - return maxCNID + 1, nil -} - -// MemoryCNIDStore keeps CNIDs in-memory for the lifetime of the AFP service. -type MemoryCNIDStore struct { - mu sync.RWMutex - cnidToPath map[uint32]string - pathToCNID map[string]uint32 - nextCNID uint32 -} - -func NewMemoryCNIDStore() *MemoryCNIDStore { - return &MemoryCNIDStore{ - cnidToPath: make(map[uint32]string), - pathToCNID: make(map[string]uint32), - nextCNID: firstDynamicCNID, - } -} - -func (s *MemoryCNIDStore) RootID() uint32 { return CNIDRoot } - -func (s *MemoryCNIDStore) Path(cnid uint32) (string, bool) { - s.mu.RLock() - defer s.mu.RUnlock() - path, ok := s.cnidToPath[cnid] - return path, ok -} - -func (s *MemoryCNIDStore) CNID(path string) (uint32, bool) { - s.mu.RLock() - defer s.mu.RUnlock() - cnid, ok := s.pathToCNID[filepath.Clean(path)] - return cnid, ok -} - -func (s *MemoryCNIDStore) Ensure(path string) uint32 { - path = filepath.Clean(path) - - s.mu.Lock() - defer s.mu.Unlock() - - if cnid, ok := s.pathToCNID[path]; ok { - return cnid - } - - cnid := s.nextAvailableCNIDLocked() - s.cnidToPath[cnid] = path - s.pathToCNID[path] = cnid - return cnid -} - -func (s *MemoryCNIDStore) EnsureReserved(path string, cnid uint32) uint32 { - path = filepath.Clean(path) - - s.mu.Lock() - defer s.mu.Unlock() - - if existing, ok := s.pathToCNID[path]; ok { - return existing - } - if existingPath, ok := s.cnidToPath[cnid]; ok && existingPath != path { - delete(s.pathToCNID, existingPath) - } - - s.cnidToPath[cnid] = path - s.pathToCNID[path] = cnid - if cnid >= s.nextCNID { - s.nextCNID = cnid + 1 - if s.nextCNID < firstDynamicCNID { - s.nextCNID = firstDynamicCNID - } - } - return cnid -} - -func (s *MemoryCNIDStore) Rebind(oldPath, newPath string) { - oldPath = filepath.Clean(oldPath) - newPath = filepath.Clean(newPath) - prefix := oldPath + string(filepath.Separator) - - s.mu.Lock() - defer s.mu.Unlock() - - for cnid, path := range s.cnidToPath { - if path != oldPath && !strings.HasPrefix(path, prefix) { - continue - } - suffix := strings.TrimPrefix(path, oldPath) - mapped := filepath.Clean(newPath + suffix) - delete(s.pathToCNID, path) - s.cnidToPath[cnid] = mapped - s.pathToCNID[mapped] = cnid - } -} - -func (s *MemoryCNIDStore) Remove(path string) { - path = filepath.Clean(path) - prefix := path + string(filepath.Separator) - - s.mu.Lock() - defer s.mu.Unlock() - - for cnid, current := range s.cnidToPath { - if current == path || strings.HasPrefix(current, prefix) { - delete(s.cnidToPath, cnid) - delete(s.pathToCNID, current) - } - } -} - -func (s *MemoryCNIDStore) nextAvailableCNIDLocked() uint32 { - for { - cnid := s.nextCNID - s.nextCNID++ - if cnid < firstDynamicCNID { - continue - } - if _, exists := s.cnidToPath[cnid]; !exists { - return cnid - } - } -} diff --git a/service/afp/config.go b/service/afp/config.go index 31c18be..5c9cd0d 100644 --- a/service/afp/config.go +++ b/service/afp/config.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -5,19 +7,149 @@ import ( "strings" ) +const ( + FSTypeLocalFS = "local_fs" + FSTypeMacGarden = "macgarden" +) + +// Config is AFP's user-facing configuration. It is populated by koanf +// (or any source) before being handed to NewService. Runtime objects +// like transports, FileSystem, and ExtensionMap are constructor args, +// not config. +type Config struct { + Enabled bool `koanf:"enabled"` + Name string `koanf:"name"` + Zone string `koanf:"zone"` + // Protocols is a comma-separated list: "tcp", "ddp", or "tcp,ddp". + Protocols string `koanf:"protocols"` + // Binding is the AFP-over-TCP listen address (e.g. ":548"). + Binding string `koanf:"binding"` + // ExtensionMap is the path to a netatalk-style type/creator file. + // Resolved by the caller against the config-file directory if relative. + ExtensionMap string `koanf:"extension_map"` + UseDecomposedNames bool `koanf:"use_decomposed_names"` + CNIDBackend string `koanf:"cnid_backend"` + DesktopBackend string `koanf:"desktop_backend"` + AppleDoubleMode string `koanf:"appledouble_mode"` + PersistentVolumeIDs bool `koanf:"persistent_volume_ids"` + + // Volumes is a name-keyed map; the key is used as the default volume + // Name when the section omits one. + Volumes map[string]VolumeConfig `koanf:"volumes"` +} + +// DefaultConfig returns AFP's built-in defaults. These are also used as +// the seed values for koanf unmarshalling so unset keys keep their +// defaults rather than being zeroed. +func DefaultConfig() Config { + return Config{ + Enabled: true, + Name: "Go File Server", + Protocols: "tcp,ddp", + Binding: ":548", + UseDecomposedNames: true, + CNIDBackend: "sqlite", + DesktopBackend: "sqlite", + AppleDoubleMode: string(defaultAppleDoubleMode), + PersistentVolumeIDs: true, + } +} + +// Validate checks the config for logical consistency. Syntactic decoding +// errors are caught earlier by the unmarshaller; this method enforces +// rules that the type system can't express. +func (c *Config) Validate() error { + if !c.Enabled { + return nil + } + if strings.TrimSpace(c.Name) == "" { + return fmt.Errorf("AFP.name must not be empty") + } + for _, p := range strings.Split(c.Protocols, ",") { + p = strings.TrimSpace(strings.ToLower(p)) + switch p { + case "", "tcp", "ddp": + default: + return fmt.Errorf("AFP.protocols entry %q must be tcp or ddp", p) + } + } + if _, err := ParseAppleDoubleMode(c.AppleDoubleMode); err != nil { + return fmt.Errorf("AFP.%w", err) + } + for key, v := range c.Volumes { + section := "AFP.volumes." + key + fsType, err := NormalizeFSType(v.FSType) + if err != nil { + return fmt.Errorf("[%s] %w", section, err) + } + if strings.TrimSpace(v.Path) == "" && fsType != FSTypeMacGarden { + return fmt.Errorf("[%s] path is required", section) + } + if v.AppleDoubleMode != "" { + if _, err := ParseAppleDoubleMode(string(v.AppleDoubleMode)); err != nil { + return fmt.Errorf("[%s] %w", section, err) + } + } + } + return nil +} + +// ResolvedVolumes returns Volumes as a flat slice, with map keys folded +// into Name where the section did not set one and FSType normalized. +// MacGarden volumes without a path get a default derived from Name. +func (c *Config) ResolvedVolumes() ([]VolumeConfig, error) { + out := make([]VolumeConfig, 0, len(c.Volumes)) + for key, v := range c.Volumes { + if strings.TrimSpace(v.Name) == "" { + v.Name = key + } + fsType, err := NormalizeFSType(v.FSType) + if err != nil { + return nil, fmt.Errorf("[AFP.volumes.%s] %w", key, err) + } + v.FSType = fsType + if strings.TrimSpace(v.Path) == "" && fsType == FSTypeMacGarden { + v.Path = DefaultMacGardenVolumePath(v.Name) + } + if v.AppleDoubleMode != "" { + mode, err := ParseAppleDoubleMode(string(v.AppleDoubleMode)) + if err != nil { + return nil, fmt.Errorf("[AFP.volumes.%s] %w", key, err) + } + v.AppleDoubleMode = mode + } + out = append(out, v) + } + return out, nil +} + // VolumeConfig holds the configuration for a single AFP-shared volume. type VolumeConfig struct { - Name string - Path string - Password string - ReadOnly bool - RebuildDesktopDB bool - AppleDoubleMode AppleDoubleMode // per-volume override; empty means inherit from AFPOptions + Name string `koanf:"name"` + Path string `koanf:"path"` + FSType string `koanf:"fs_type"` + Password string `koanf:"password"` + ReadOnly bool `koanf:"read_only"` + RebuildDesktopDB bool `koanf:"rebuild_desktop_db"` + AppleDoubleMode AppleDoubleMode `koanf:"appledouble_mode"` +} + +func NormalizeFSType(s string) (string, error) { + v := strings.ToLower(strings.TrimSpace(s)) + if v == "" { + return FSTypeLocalFS, nil + } + fsRegistryMu.RLock() + _, ok := fsRegistry[v] + fsRegistryMu.RUnlock() + if !ok { + return "", fmt.Errorf("invalid fs_type %q (registered: %v)", s, registeredFSNames()) + } + return v, nil } // ParseVolumeFlag parses an -afp-volume flag value of the form "Name:Path". // The name may contain spaces; the first colon separates name from path. -// Example: "Mac Share:c:\mac" or "Mac Stuff:/media/mac/classic" func ParseVolumeFlag(s string) (VolumeConfig, error) { idx := strings.Index(s, ":") if idx < 1 { @@ -28,5 +160,5 @@ func ParseVolumeFlag(s string) (VolumeConfig, error) { if path == "" { return VolumeConfig{}, fmt.Errorf("invalid -afp-volume %q: path is empty", s) } - return VolumeConfig{Name: name, Path: path}, nil + return VolumeConfig{Name: name, Path: path, FSType: FSTypeLocalFS}, nil } diff --git a/service/afp/config_test.go b/service/afp/config_test.go index 087735f..f53db81 100644 --- a/service/afp/config_test.go +++ b/service/afp/config_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import "testing" diff --git a/service/afp/desktop.go b/service/afp/desktop.go index 327bbf7..13d0179 100644 --- a/service/afp/desktop.go +++ b/service/afp/desktop.go @@ -1,20 +1,21 @@ +//go:build afp || all + package afp import ( "bytes" + "errors" + "io/fs" "path/filepath" - "github.com/pgodw/omnitalk/go/netlog" + "github.com/pgodw/omnitalk/netlog" ) -// getDesktopDB looks up the DesktopDB associated with a DTRefNum. -// Must be called with s.mu held (at least RLock). -func (s *AFPService) getDesktopDB(dtRefNum uint16) (DesktopDB, bool) { - volID, ok := s.dtRefs[dtRefNum] - if !ok { - return nil, false - } - db, ok := s.desktopDBs[volID] +// getDesktopDB looks up the DesktopDB associated with a DTRefNum. The +// returned bool is false when either the ref number is unknown or the +// underlying DesktopDB was never opened. +func (s *Service) getDesktopDB(dtRefNum uint16) (DesktopDB, bool) { + db, _, ok := s.desktop.lookupDB(dtRefNum) return db, ok } @@ -30,7 +31,7 @@ func volRelPath(volumeRoot, absPath string) string { // handleOpenDT opens the Desktop database for a volume. // It creates the .AppleDesktop directory (for SMB client compatibility) and // opens or initialises the .desktop.db cache for AFP desktop operations. -func (s *AFPService) handleOpenDT(req *FPOpenDTReq) (*FPOpenDTRes, int32) { +func (s *Service) handleOpenDT(req *FPOpenDTReq) (*FPOpenDTRes, int32) { root, ok := s.volumeRootByID(req.VolID) if !ok { return &FPOpenDTRes{}, ErrParamErr @@ -39,50 +40,44 @@ func (s *AFPService) handleOpenDT(req *FPOpenDTReq) (*FPOpenDTRes, int32) { // Keep .AppleDesktop directory for SMB client compatibility — macOS writes // its own Desktop DB / Desktop DF files into this directory. dtDir := filepath.Join(root, ".AppleDesktop") - if _, err := s.fs.Stat(dtDir); err != nil { - if err2 := s.fs.CreateDir(dtDir); err2 != nil { - if _, err3 := s.fs.Stat(dtDir); err3 != nil { - return &FPOpenDTRes{}, ErrMiscErr + backend := s.fsForVolume(req.VolID) + if backend == nil { + return &FPOpenDTRes{}, ErrParamErr + } + if _, err := backend.Stat(dtDir); err != nil { + if err2 := backend.CreateDir(dtDir); err2 != nil { + if errors.Is(err2, fs.ErrPermission) || isNotSupported(err2) || s.volumeIsReadOnly(req.VolID) { + netlog.Debug("[AFP][Desktop] skipping .AppleDesktop creation for volume=%d dir=%q: %v", req.VolID, dtDir, err2) + } else { + if _, err3 := backend.Stat(dtDir); err3 != nil { + return &FPOpenDTRes{}, ErrMiscErr + } } } } - s.mu.Lock() - defer s.mu.Unlock() - - // Lazily open the .desktop.db for this volume. - if _, loaded := s.desktopDBs[req.VolID]; !loaded { - volume, vok := s.volumeByID(req.VolID) - if !vok { - return &FPOpenDTRes{}, ErrParamErr - } - s.desktopDBs[req.VolID] = s.desktopDB.Open(volume) + volume, vok := s.volumeByID(req.VolID) + if !vok { + return &FPOpenDTRes{}, ErrParamErr } - dtRef := s.nextDTRef - s.nextDTRef++ - s.dtRefs[dtRef] = req.VolID - + dtRef := s.desktop.openRef(req.VolID, func() DesktopDB { + return s.desktopDB.Open(volume) + }) return &FPOpenDTRes{DTRefNum: dtRef}, NoErr } // handleCloseDT invalidates a Desktop database reference number. -func (s *AFPService) handleCloseDT(req *FPCloseDTReq) (*FPCloseDTRes, int32) { - s.mu.Lock() - defer s.mu.Unlock() - if _, ok := s.dtRefs[req.DTRefNum]; !ok { +func (s *Service) handleCloseDT(req *FPCloseDTReq) (*FPCloseDTRes, int32) { + if !s.desktop.closeRef(req.DTRefNum) { return &FPCloseDTRes{}, ErrParamErr } - delete(s.dtRefs, req.DTRefNum) return &FPCloseDTRes{}, NoErr } // handleAddIcon stores an icon bitmap in the Desktop database. -func (s *AFPService) handleAddIcon(req *FPAddIconReq) (*FPAddIconRes, int32) { - s.mu.RLock() - db, ok := s.getDesktopDB(req.DTRefNum) - volID, _ := s.dtRefs[req.DTRefNum] - s.mu.RUnlock() +func (s *Service) handleAddIcon(req *FPAddIconReq) (*FPAddIconRes, int32) { + db, volID, ok := s.desktop.lookupDB(req.DTRefNum) if !ok { netlog.Debug("[AFP][Desktop] FPAddIcon dtRef=%d creator=%q type=%q itype=%d tag=%d size=%d -> ErrParamErr (no desktop db)", req.DTRefNum, string(req.Creator[:]), string(req.Type[:]), req.IType, req.Tag, req.Size) return &FPAddIconRes{}, ErrParamErr @@ -105,10 +100,8 @@ func (s *AFPService) handleAddIcon(req *FPAddIconReq) (*FPAddIconRes, int32) { } // handleGetIcon retrieves an icon bitmap from the Desktop database. -func (s *AFPService) handleGetIcon(req *FPGetIconReq) (*FPGetIconRes, int32) { - s.mu.RLock() - db, ok := s.getDesktopDB(req.DTRefNum) - s.mu.RUnlock() +func (s *Service) handleGetIcon(req *FPGetIconReq) (*FPGetIconRes, int32) { + db, _, ok := s.desktop.lookupDB(req.DTRefNum) if !ok { netlog.Debug("[AFP][Desktop] FPGetIcon dtRef=%d creator=%q type=%q itype=%d size=%d -> ErrParamErr (no desktop db)", req.DTRefNum, string(req.Creator[:]), string(req.Type[:]), req.IType, req.Size) return &FPGetIconRes{}, ErrParamErr @@ -119,7 +112,7 @@ func (s *AFPService) handleGetIcon(req *FPGetIconReq) (*FPGetIconRes, int32) { // creator and ingest icons from each app's AppleDouble resource fork. // Bounded by the number of registered apps for the creator — never // rebuilds the whole volume. - volID, vok := s.dtRefs[req.DTRefNum] + volID, vok := s.desktop.volumeOf(req.DTRefNum) if vok { s.ingestAppleDoubleIconsForCreator(volID, db, req.Creator) entry, found = db.GetIcon(req.Creator, req.Type, req.IType) @@ -144,10 +137,8 @@ func (s *AFPService) handleGetIcon(req *FPGetIconReq) (*FPGetIconRes, int32) { } // handleGetIconInfo retrieves icon metadata by 1-based index for a given creator. -func (s *AFPService) handleGetIconInfo(req *FPGetIconInfoReq) (*FPGetIconInfoRes, int32) { - s.mu.RLock() - db, ok := s.getDesktopDB(req.DTRefNum) - s.mu.RUnlock() +func (s *Service) handleGetIconInfo(req *FPGetIconInfoReq) (*FPGetIconInfoRes, int32) { + db, _, ok := s.desktop.lookupDB(req.DTRefNum) if !ok { netlog.Debug("[AFP][Desktop] FPGetIconInfo dtRef=%d creator=%q index=%d -> ErrParamErr (no desktop db)", req.DTRefNum, string(req.Creator[:]), req.IconIndex) return &FPGetIconInfoRes{}, ErrParamErr @@ -174,11 +165,8 @@ func (s *AFPService) handleGetIconInfo(req *FPGetIconInfoReq) (*FPGetIconInfoRes } // handleAddAPPL registers an APPL mapping in the Desktop database. -func (s *AFPService) handleAddAPPL(req *FPAddAPPLReq) (*FPAddAPPLRes, int32) { - s.mu.RLock() - db, ok := s.getDesktopDB(req.DTRefNum) - volID, _ := s.dtRefs[req.DTRefNum] - s.mu.RUnlock() +func (s *Service) handleAddAPPL(req *FPAddAPPLReq) (*FPAddAPPLRes, int32) { + db, volID, ok := s.desktop.lookupDB(req.DTRefNum) if !ok { netlog.Debug("[AFP][Desktop] FPAddAPPL dtRef=%d creator=%q dirID=%d tag=%d path=%q -> ErrParamErr (no desktop db)", req.DTRefNum, string(req.Creator[:]), req.DirID, req.Tag, req.Path) return &FPAddAPPLRes{}, ErrParamErr @@ -218,11 +206,8 @@ func (s *AFPService) handleAddAPPL(req *FPAddAPPLReq) (*FPAddAPPLRes, int32) { } // handleRemoveAPPL removes an APPL mapping from the Desktop database. -func (s *AFPService) handleRemoveAPPL(req *FPRemoveAPPLReq) (*FPRemoveAPPLRes, int32) { - s.mu.RLock() - db, ok := s.getDesktopDB(req.DTRefNum) - volID, _ := s.dtRefs[req.DTRefNum] - s.mu.RUnlock() +func (s *Service) handleRemoveAPPL(req *FPRemoveAPPLReq) (*FPRemoveAPPLRes, int32) { + db, volID, ok := s.desktop.lookupDB(req.DTRefNum) if !ok { return &FPRemoveAPPLRes{}, ErrParamErr } @@ -236,11 +221,8 @@ func (s *AFPService) handleRemoveAPPL(req *FPRemoveAPPLReq) (*FPRemoveAPPLRes, i } // handleGetAPPL retrieves an APPL mapping by 0-based index and returns file parameters. -func (s *AFPService) handleGetAPPL(req *FPGetAPPLReq) (*FPGetAPPLRes, int32) { - s.mu.RLock() - db, ok := s.getDesktopDB(req.DTRefNum) - volID, _ := s.dtRefs[req.DTRefNum] - s.mu.RUnlock() +func (s *Service) handleGetAPPL(req *FPGetAPPLReq) (*FPGetAPPLRes, int32) { + db, volID, ok := s.desktop.lookupDB(req.DTRefNum) if !ok { netlog.Debug("[AFP][Desktop] FPGetAPPL dtRef=%d creator=%q index=%d bitmap=0x%04x -> ErrParamErr (no desktop db)", req.DTRefNum, string(req.Creator[:]), req.APPLIndex, req.Bitmap) return emptyGetAPPLRes(req), ErrParamErr @@ -291,11 +273,8 @@ func emptyGetAPPLRes(req *FPGetAPPLReq) *FPGetAPPLRes { // handleAddComment stores a Finder comment in the AppleDouble sidecar (preferred) // or in the Desktop database (fallback when no CommentBackend is available). -func (s *AFPService) handleAddComment(req *FPAddCommentReq) (*FPAddCommentRes, int32) { - s.mu.RLock() - volID, volOK := s.dtRefs[req.DTRefNum] - db, _ := s.getDesktopDB(req.DTRefNum) - s.mu.RUnlock() +func (s *Service) handleAddComment(req *FPAddCommentReq) (*FPAddCommentRes, int32) { + db, volID, volOK := s.desktop.lookup(req.DTRefNum) if !volOK { return &FPAddCommentRes{}, ErrParamErr } @@ -333,11 +312,8 @@ func (s *AFPService) handleAddComment(req *FPAddCommentReq) (*FPAddCommentRes, i // handleRemoveComment removes a Finder comment from the AppleDouble sidecar (preferred) // or from the Desktop database (fallback). -func (s *AFPService) handleRemoveComment(req *FPRemoveCommentReq) (*FPRemoveCommentRes, int32) { - s.mu.RLock() - volID, volOK := s.dtRefs[req.DTRefNum] - db, _ := s.getDesktopDB(req.DTRefNum) - s.mu.RUnlock() +func (s *Service) handleRemoveComment(req *FPRemoveCommentReq) (*FPRemoveCommentRes, int32) { + db, volID, volOK := s.desktop.lookup(req.DTRefNum) if !volOK { return &FPRemoveCommentRes{}, ErrParamErr } @@ -375,11 +351,8 @@ func (s *AFPService) handleRemoveComment(req *FPRemoveCommentReq) (*FPRemoveComm // handleGetComment retrieves a Finder comment from the AppleDouble sidecar (preferred) // or from the Desktop database (fallback). -func (s *AFPService) handleGetComment(req *FPGetCommentReq) (*FPGetCommentRes, int32) { - s.mu.RLock() - volID, volOK := s.dtRefs[req.DTRefNum] - db, _ := s.getDesktopDB(req.DTRefNum) - s.mu.RUnlock() +func (s *Service) handleGetComment(req *FPGetCommentReq) (*FPGetCommentRes, int32) { + db, volID, volOK := s.desktop.lookup(req.DTRefNum) if !volOK { return &FPGetCommentRes{}, ErrParamErr } @@ -413,3 +386,11 @@ func (s *AFPService) handleGetComment(req *FPGetCommentReq) (*FPGetCommentRes, i } return &FPGetCommentRes{Comment: []byte(comment)}, NoErr } + +func (s *Service) spawnDesktopRebuild() { + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.rebuildDesktopDBsIfConfigured() + }() +} diff --git a/service/afp/desktop_models.go b/service/afp/desktop_models.go index c94122d..160a0d5 100644 --- a/service/afp/desktop_models.go +++ b/service/afp/desktop_models.go @@ -1,8 +1,12 @@ +//go:build afp || all + package afp import ( "encoding/binary" "fmt" + + "github.com/pgodw/omnitalk/pkg/binutil" ) // FPOpenDT - open the Desktop Database for a volume. @@ -25,10 +29,16 @@ type FPOpenDTRes struct { DTRefNum uint16 } +func (res *FPOpenDTRes) WireSize() int { return 2 } + +func (res *FPOpenDTRes) MarshalWire(b []byte) (int, error) { + return binutil.PutU16(b, res.DTRefNum) +} + func (res *FPOpenDTRes) Marshal() []byte { - buf := make([]byte, 2) - binary.BigEndian.PutUint16(buf, res.DTRefNum) - return buf + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPOpenDTRes) String() string { @@ -258,12 +268,31 @@ type FPGetAPPLRes struct { Data []byte } +func (res *FPGetAPPLRes) WireSize() int { return 6 + len(res.Data) } + +func (res *FPGetAPPLRes) MarshalWire(b []byte) (int, error) { + off := 0 + n, err := binutil.PutU16(b[off:], res.Bitmap) + if err != nil { + return 0, err + } + off += n + n, err = binutil.PutU32(b[off:], res.APPLTag) + if err != nil { + return 0, err + } + off += n + if len(b[off:]) < len(res.Data) { + return 0, binutil.ErrShortBuffer + } + off += copy(b[off:], res.Data) + return off, nil +} + func (res *FPGetAPPLRes) Marshal() []byte { - buf := make([]byte, 6+len(res.Data)) - binary.BigEndian.PutUint16(buf[0:2], res.Bitmap) - binary.BigEndian.PutUint32(buf[2:6], res.APPLTag) - copy(buf[6:], res.Data) - return buf + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPGetAPPLRes) String() string { @@ -383,14 +412,30 @@ type FPGetCommentRes struct { Comment []byte } -func (res *FPGetCommentRes) Marshal() []byte { - if len(res.Comment) > 128 { - res.Comment = res.Comment[:128] +func (res *FPGetCommentRes) commentLen() int { + n := len(res.Comment) + if n > 128 { + n = 128 } - out := make([]byte, 1+len(res.Comment)) - out[0] = byte(len(res.Comment)) - copy(out[1:], res.Comment) - return out + return n +} + +func (res *FPGetCommentRes) WireSize() int { return 1 + res.commentLen() } + +func (res *FPGetCommentRes) MarshalWire(b []byte) (int, error) { + clen := res.commentLen() + if len(b) < 1+clen { + return 0, binutil.ErrShortBuffer + } + b[0] = byte(clen) + copy(b[1:], res.Comment[:clen]) + return 1 + clen, nil +} + +func (res *FPGetCommentRes) Marshal() []byte { + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPGetCommentRes) String() string { diff --git a/service/afp/desktop_models_golden_test.go b/service/afp/desktop_models_golden_test.go new file mode 100644 index 0000000..bf01d89 --- /dev/null +++ b/service/afp/desktop_models_golden_test.go @@ -0,0 +1,42 @@ +//go:build afp || all + +package afp + +import ( + "bytes" + "testing" +) + +func TestFPOpenDTRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPOpenDTRes{DTRefNum: 0xCAFE} + got := res.Marshal() + want := goldenBytes(t, "fpopendtres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +func TestFPGetAPPLRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPGetAPPLRes{ + Bitmap: 0x07FB, + APPLTag: 0xDEADBEEF, + Data: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, + } + got := res.Marshal() + want := goldenBytes(t, "fpgetapplres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +func TestFPGetCommentRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPGetCommentRes{Comment: []byte("Hello, comment!")} + got := res.Marshal() + want := goldenBytes(t, "fpgetcommentres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} diff --git a/service/afp/desktop_rebuild.go b/service/afp/desktop_rebuild.go index f5adea1..870ebf8 100644 --- a/service/afp/desktop_rebuild.go +++ b/service/afp/desktop_rebuild.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp // Desktop database rebuild / ingest support. Populates the in-memory and @@ -12,7 +14,8 @@ import ( "path/filepath" "strings" - "github.com/pgodw/omnitalk/go/netlog" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/pkg/appledouble" ) // EnableAppleDoubleIconFallback controls whether FPGetIcon misses trigger a @@ -21,24 +24,22 @@ import ( // enabling it costs a one-time O(N) scan per volume on first icon miss. const EnableAppleDoubleIconFallback = true -// volumeRootByIDLocked is the lock-free helper used from ingest paths that -// already hold s.mu. -func (s *AFPService) desktopDBForVolumeLocked(volID uint16) DesktopDB { - if db, ok := s.desktopDBs[volID]; ok { - return db - } +// desktopDBForVolume returns the per-volume DesktopDB, opening it lazily on +// first use. Safe to call from ingest paths without holding any external +// lock — desktopState provides its own synchronisation. +func (s *Service) desktopDBForVolume(volID uint16) DesktopDB { volume, ok := s.volumeByID(volID) if !ok { return nil } - db := s.desktopDB.Open(volume) - s.desktopDBs[volID] = db - return db + return s.desktop.dbForVolume(volID, func() DesktopDB { + return s.desktopDB.Open(volume) + }) } // appleDoubleOwnerPath normalizes a host file path or AppleDouble sidecar path // to the logical host file path the metadata backend expects. -func (s *AFPService) appleDoubleOwnerPath(filePath string) string { +func (s *Service) appleDoubleOwnerPath(filePath string) string { m := s.metaForPath(filePath) if backend, ok := m.(*AppleDoubleBackend); ok { return backend.ownerPath(filePath) @@ -48,7 +49,7 @@ func (s *AFPService) appleDoubleOwnerPath(filePath string) string { // appleDoubleMetadataPath returns the sidecar path for filePath using the // MetadataPath method on the metadata backend. Returns "" if no backend is configured. -func (s *AFPService) appleDoubleMetadataPath(filePath string) string { +func (s *Service) appleDoubleMetadataPath(filePath string) string { filePath = s.appleDoubleOwnerPath(filePath) m := s.metaForPath(filePath) if m == nil { @@ -71,7 +72,7 @@ func (s *AFPService) appleDoubleMetadataPath(filePath string) string { // the well-known resource ID -16455 (kCustomIconResource). // // Returns the number of icons added. -func (s *AFPService) IngestAppleDoubleIcons(volID uint16, filePath string) int { +func (s *Service) IngestAppleDoubleIcons(volID uint16, filePath string) int { filePath = s.appleDoubleOwnerPath(filePath) adPath := s.appleDoubleMetadataPath(filePath) if adPath == "" { @@ -81,28 +82,28 @@ func (s *AFPService) IngestAppleDoubleIcons(volID uint16, filePath string) int { if err != nil { return 0 } - ad, err := parseAppleDoubleBytes(raw) + ad, err := appledouble.Parse(raw) if err != nil { return 0 } - isAPPL := ad.hasFinder && ad.finderInfo[0] == 'A' && ad.finderInfo[1] == 'P' && ad.finderInfo[2] == 'P' && ad.finderInfo[3] == 'L' + isAPPL := ad.HasFinder && ad.FinderInfo[0] == 'A' && ad.FinderInfo[1] == 'P' && ad.FinderInfo[2] == 'P' && ad.FinderInfo[3] == 'L' isIconFile := isIconFile(filepath.Base(filePath)) var icons []extractedIcon // For APPL files, the AppleDouble embedded icon entry is ignored — the // authoritative app icon lives in the resource fork's ID-128 icon family. - if !isAPPL && !isIconFile && ad.hasIconBW && len(ad.iconBW) > 0 && ad.hasFinder { - if icon, ok := iconFromAppleDoubleEntry(ad.finderInfo, ad.iconBW); ok { + if !isAPPL && !isIconFile && ad.HasIconBW && len(ad.IconBW) > 0 && ad.HasFinder { + if icon, ok := iconFromAppleDoubleEntry(ad.FinderInfo, ad.IconBW); ok { icons = append(icons, icon) } } - if ad.hasRsrc && len(ad.rsrc) > 0 { - icons = append(icons, extractIconsFromResourceFork(ad.rsrc)...) + if ad.HasResource && len(ad.Resource) > 0 { + icons = append(icons, extractIconsFromResourceFork(ad.Resource)...) if isAPPL { var creator [4]byte - copy(creator[:], ad.finderInfo[4:8]) - icons = append(icons, extractAppIconFromResourceFork(ad.rsrc, creator)...) + copy(creator[:], ad.FinderInfo[4:8]) + icons = append(icons, extractAppIconFromResourceFork(ad.Resource, creator)...) } if isIconFile { // Icon\r files store custom folder icons at resource ID -16455. @@ -111,16 +112,14 @@ func (s *AFPService) IngestAppleDoubleIcons(volID uint16, filePath string) int { var creator, fileType [4]byte copy(creator[:], "MACS") copy(fileType[:], "fldr") - icons = append(icons, extractCustomIconFromResourceFork(ad.rsrc, creator, fileType)...) + icons = append(icons, extractCustomIconFromResourceFork(ad.Resource, creator, fileType)...) } } if len(icons) == 0 { return 0 } - s.mu.Lock() - db := s.desktopDBForVolumeLocked(volID) - s.mu.Unlock() + db := s.desktopDBForVolume(volID) if db == nil { return 0 } @@ -145,7 +144,7 @@ func (s *AFPService) IngestAppleDoubleIcons(volID uint16, filePath string) int { // for creator on volID and feeds each app file through IngestAppleDoubleIcons. // This is the per-file fallback used by FPGetIcon on a cache miss — it never // walks the volume. -func (s *AFPService) ingestAppleDoubleIconsForCreator(volID uint16, db DesktopDB, creator [4]byte) { +func (s *Service) ingestAppleDoubleIconsForCreator(volID uint16, db DesktopDB, creator [4]byte) { entries := db.ListAPPL(creator) for _, e := range entries { path, errCode := s.resolveVolumePath(volID, e.dirID, e.pathname, 2 /* long names */) @@ -162,7 +161,7 @@ func (s *AFPService) ingestAppleDoubleIconsForCreator(volID uint16, db DesktopDB // It also probes each directory for an Icon\r file (using the canonical // host name from the metadata backend) and ingests custom folder icons. // Returns (filesScanned, iconsAdded). -func (s *AFPService) RebuildDesktopDBFromVolume(volID uint16) (filesScanned, iconsAdded int) { +func (s *Service) RebuildDesktopDBFromVolume(volID uint16) (filesScanned, iconsAdded int) { root, ok := s.volumeRootByID(volID) if !ok { return 0, 0 @@ -180,10 +179,13 @@ func (s *AFPService) RebuildDesktopDBFromVolume(volID uint16) (filesScanned, ico } // Probe for an Icon\r file inside this directory. iconPath := filepath.Join(path, iconName) - if _, iconErr := s.fs.Stat(iconPath); iconErr == nil { - netlog.Debug("[AFP][Desktop] rebuild scanning icon file=%q", iconPath) - filesScanned++ - iconsAdded += s.IngestAppleDoubleIcons(volID, iconPath) + backend := s.fsForPath(iconPath) + if backend != nil { + if _, iconErr := backend.Stat(iconPath); iconErr == nil { + netlog.Debug("[AFP][Desktop] rebuild scanning icon file=%q", iconPath) + filesScanned++ + iconsAdded += s.IngestAppleDoubleIcons(volID, iconPath) + } } return nil } @@ -201,7 +203,7 @@ func (s *AFPService) RebuildDesktopDBFromVolume(volID uint16) (filesScanned, ico // rebuildDesktopDBsIfConfigured triggers a rebuild for each volume that has // RebuildDesktopDB set in its VolumeConfig. Safe to call once at service start. -func (s *AFPService) rebuildDesktopDBsIfConfigured() { +func (s *Service) rebuildDesktopDBsIfConfigured() { for i := range s.Volumes { if s.Volumes[i].Config.RebuildDesktopDB { s.RebuildDesktopDBFromVolume(s.Volumes[i].ID) diff --git a/service/afp/desktop_state.go b/service/afp/desktop_state.go new file mode 100644 index 0000000..384646a --- /dev/null +++ b/service/afp/desktop_state.go @@ -0,0 +1,134 @@ +//go:build afp || all + +package afp + +import "sync" + +// desktopState owns the per-volume Desktop database handles and the +// DTRefNum → volume mapping handed out by FPOpenDT. The desktop subsystem +// only ever needs these three fields, so they sit behind their own +// RWMutex to keep AFP's auth / fork / volume call paths off the same +// contention domain. +type desktopState struct { + mu sync.RWMutex + dbs map[uint16]DesktopDB // volID → DesktopDB + refs map[uint16]uint16 // DTRefNum → volID + nextDTRef uint16 +} + +func newDesktopState() desktopState { + return desktopState{ + dbs: make(map[uint16]DesktopDB), + refs: make(map[uint16]uint16), + nextDTRef: 1, + } +} + +// volumeOf returns the volume id associated with a DTRefNum. The second +// result is false when the reference number was never issued or has been +// closed. +func (d *desktopState) volumeOf(dtRefNum uint16) (uint16, bool) { + d.mu.RLock() + defer d.mu.RUnlock() + volID, ok := d.refs[dtRefNum] + return volID, ok +} + +// lookup returns the DesktopDB for the given DTRefNum and the volume id it +// was opened against. The bool reports whether the DTRefNum is known; the +// returned DesktopDB may still be nil when the ref exists but the +// per-volume DB has not been opened (e.g. tests stub the ref directly). +// Callers that need both must use lookupDB. +func (d *desktopState) lookup(dtRefNum uint16) (DesktopDB, uint16, bool) { + d.mu.RLock() + defer d.mu.RUnlock() + volID, ok := d.refs[dtRefNum] + if !ok { + return nil, 0, false + } + return d.dbs[volID], volID, true +} + +// lookupDB is the strict variant of lookup: it returns ok=false unless both +// the DTRefNum is known and a DesktopDB has been opened for its volume. +func (d *desktopState) lookupDB(dtRefNum uint16) (DesktopDB, uint16, bool) { + d.mu.RLock() + defer d.mu.RUnlock() + volID, ok := d.refs[dtRefNum] + if !ok { + return nil, 0, false + } + db, ok := d.dbs[volID] + if !ok { + return nil, volID, false + } + return db, volID, true +} + +// openRef registers a new DTRefNum for volID and returns it. +// loader is invoked exactly once per volume the first time openRef is called +// for that volume. It must not call back into desktopState. +func (d *desktopState) openRef(volID uint16, loader func() DesktopDB) uint16 { + d.mu.Lock() + defer d.mu.Unlock() + if _, loaded := d.dbs[volID]; !loaded { + d.dbs[volID] = loader() + } + ref := d.nextDTRef + d.nextDTRef++ + d.refs[ref] = volID + return ref +} + +// closeRef invalidates a DTRefNum. It returns false when the reference was +// already closed or never existed. +func (d *desktopState) closeRef(dtRefNum uint16) bool { + d.mu.Lock() + defer d.mu.Unlock() + if _, ok := d.refs[dtRefNum]; !ok { + return false + } + delete(d.refs, dtRefNum) + return true +} + +// dbForVolume returns (and lazily creates via loader) the DesktopDB for +// volID. loader is invoked under the write lock and must not call back into +// desktopState. +func (d *desktopState) dbForVolume(volID uint16, loader func() DesktopDB) DesktopDB { + d.mu.Lock() + defer d.mu.Unlock() + if db, ok := d.dbs[volID]; ok { + return db + } + db := loader() + if db == nil { + return nil + } + d.dbs[volID] = db + return db +} + +// putDBForTest installs a DesktopDB directly. Tests use this to seed state +// without going through FPOpenDT. +func (d *desktopState) putDBForTest(volID uint16, db DesktopDB) { + d.mu.Lock() + defer d.mu.Unlock() + d.dbs[volID] = db +} + +// putRefForTest installs a DTRefNum → volID mapping directly. Tests use this +// to short-circuit FPOpenDT. +func (d *desktopState) putRefForTest(dtRefNum, volID uint16) { + d.mu.Lock() + defer d.mu.Unlock() + d.refs[dtRefNum] = volID +} + +// dbCount returns the number of opened DesktopDBs. Tests use this to assert +// no persistence side-effects. +func (d *desktopState) dbCount() int { + d.mu.RLock() + defer d.mu.RUnlock() + return len(d.dbs) +} diff --git a/service/afp/desktop_test.go b/service/afp/desktop_test.go index eb4ab5f..a0643a0 100644 --- a/service/afp/desktop_test.go +++ b/service/afp/desktop_test.go @@ -1,13 +1,32 @@ +//go:build afp || all + package afp import ( + "io/fs" + "path/filepath" "testing" ) +type readOnlyDesktopFSTestDouble struct { + LocalFileSystem +} + +func (f *readOnlyDesktopFSTestDouble) CreateDir(path string) error { + if filepath.Base(path) == ".AppleDesktop" { + return fs.ErrPermission + } + return f.LocalFileSystem.CreateDir(path) +} + +func (f *readOnlyDesktopFSTestDouble) IsReadOnly(_ string) (bool, error) { + return true, nil +} + func TestHandleGetIcon_MissingReturnsItemNotFound(t *testing.T) { tmp := t.TempDir() fsys := &LocalFileSystem{} - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol1", Path: tmp}}, fsys, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol1", Path: tmp}}, fsys, nil) openRes, errCode := s.handleOpenDT(&FPOpenDTReq{VolID: 1}) if errCode != NoErr { @@ -36,7 +55,7 @@ func TestHandleGetIcon_MissingReturnsItemNotFound(t *testing.T) { func TestHandleGetIcon_SizeZeroPresentProbe(t *testing.T) { tmp := t.TempDir() fsys := &LocalFileSystem{} - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol1", Path: tmp}}, fsys, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol1", Path: tmp}}, fsys, nil) openRes, errCode := s.handleOpenDT(&FPOpenDTReq{VolID: 1}) if errCode != NoErr { @@ -77,3 +96,17 @@ func TestHandleGetIcon_SizeZeroPresentProbe(t *testing.T) { t.Fatalf("handleGetIcon(size=0) returned %d bytes, want 0", len(res.Data)) } } + +func TestHandleOpenDT_ReadOnlyBackendIgnoresAppleDesktopCreateFailure(t *testing.T) { + tmp := t.TempDir() + fsys := &readOnlyDesktopFSTestDouble{} + s := NewService("TestServer", []VolumeConfig{{Name: "Vol1", Path: tmp}}, fsys, nil) + + openRes, errCode := s.handleOpenDT(&FPOpenDTReq{VolID: 1}) + if errCode != NoErr { + t.Fatalf("handleOpenDT errCode=%d, want %d", errCode, NoErr) + } + if openRes.DTRefNum == 0 { + t.Fatalf("handleOpenDT DTRefNum=%d, want non-zero", openRes.DTRefNum) + } +} diff --git a/service/afp/desktopdb.go b/service/afp/desktopdb.go index 06960f3..edc5e19 100644 --- a/service/afp/desktopdb.go +++ b/service/afp/desktopdb.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -5,7 +7,8 @@ import ( "fmt" "sync" - "github.com/pgodw/omnitalk/go/netlog" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/pkg/cnid" ) const desktopDBFilename = ".desktop.db" @@ -55,7 +58,7 @@ func (SQLiteDesktopDBBackend) Open(volume Volume) DesktopDB { return db } -func resolveDesktopDBBackend(options AFPOptions) DesktopDBBackend { +func resolveDesktopDBBackend(options Options) DesktopDBBackend { if options.DesktopStoreBackend != nil { return options.DesktopStoreBackend } @@ -79,7 +82,7 @@ type sqliteDesktopDB struct { // NewSQLiteDesktopDB opens (or creates) the Desktop database for a volume root. func NewSQLiteDesktopDB(volumeRootPath string) (DesktopDB, error) { - db, err := openSQLiteDB(volumeRootPath) + db, err := cnid.OpenSQLiteDB(volumeRootPath) if err != nil { return nil, err } diff --git a/service/afp/directory.go b/service/afp/directory.go index 8d1a3ac..b06c6b5 100644 --- a/service/afp/directory.go +++ b/service/afp/directory.go @@ -1,15 +1,17 @@ +//go:build afp || all + package afp import ( + "github.com/pgodw/omnitalk/netlog" "bytes" "errors" "io/fs" - "log" "os" "path/filepath" ) -func (s *AFPService) handleOpenDir(req *FPOpenDirReq) (*FPOpenDirRes, int32) { +func (s *Service) handleOpenDir(req *FPOpenDirReq) (*FPOpenDirRes, int32) { parentPath, ok := s.getDIDPath(req.VolumeID, req.DirID) if !ok && req.DirID != 0 { return &FPOpenDirRes{}, ErrObjectNotFound @@ -32,82 +34,151 @@ func (s *AFPService) handleOpenDir(req *FPOpenDirReq) (*FPOpenDirRes, int32) { return res, NoErr } -func (s *AFPService) handleEnumerate(req *FPEnumerateReq) (*FPEnumerateRes, int32) { - log.Printf("[AFP] FPEnumerate: DirID=%d Path=%q StartIndex=%d ReqCount=%d", req.DirID, req.Path, req.StartIndex, req.ReqCount) +// enumerateReplyHeaderLen is the fixed header size of an FPEnumerate reply +// (FileBitmap+DirBitmap+ActCount); each entry is appended after it. +const enumerateReplyHeaderLen = 6 - if req.FileBitmap == 0 && req.DirBitmap == 0 { - return &FPEnumerateRes{}, ErrBitmapErr +func (s *Service) handleEnumerate(req *FPEnumerateReq) (*FPEnumerateRes, int32) { + netlog.Debug("[AFP] FPEnumerate: DirID=%d Path=%q StartIndex=%d ReqCount=%d", req.DirID, req.Path, req.StartIndex, req.ReqCount) + + if errCode := validateEnumerateRequest(req); errCode != NoErr { + return &FPEnumerateRes{}, errCode } - if req.FileBitmap&^enumerateFileBitmapMask != 0 || req.DirBitmap&^enumerateDirBitmapMask != 0 { - return &FPEnumerateRes{}, ErrBitmapErr + volFS := s.fsForVolume(req.VolumeID) + if volFS == nil { + return &FPEnumerateRes{}, ErrParamErr } - if _, ok := s.volumeRootByID(req.VolumeID); !ok { - return &FPEnumerateRes{}, ErrParamErr + targetPath, errCode := s.resolveEnumerateTarget(req, volFS) + if errCode != NoErr { + return &FPEnumerateRes{}, errCode + } + + entries, visibleCount, usedRangeFS, errCode := s.readEnumerateEntries(volFS, targetPath, req) + if errCode != NoErr { + return &FPEnumerateRes{}, errCode + } + + resData, actCount, totalVisible := s.packEnumerateEntries(req, targetPath, entries, visibleCount, usedRangeFS) + + res := &FPEnumerateRes{ + FileBitmap: req.FileBitmap, + DirBitmap: req.DirBitmap, + ActCount: actCount, + Data: resData, + } + + errCode = NoErr + if actCount == 0 && usedRangeFS && len(entries) == 0 { + // Range-capable backends signal end-of-directory by returning an empty + // page for the requested start index. + errCode = ErrObjectNotFound + } + if actCount == 0 && req.StartIndex > uint16(totalVisible) { + errCode = ErrObjectNotFound + } + + return res, errCode +} + +// validateEnumerateRequest checks the caller-supplied bitmaps, path type, and +// MaxReply budget. It does not touch the filesystem. +func validateEnumerateRequest(req *FPEnumerateReq) int32 { + if req.FileBitmap == 0 && req.DirBitmap == 0 { + return ErrBitmapErr + } + if req.FileBitmap&^enumerateFileBitmapMask != 0 || req.DirBitmap&^enumerateDirBitmapMask != 0 { + return ErrBitmapErr } if req.Path != "" && req.PathType != 1 && req.PathType != 2 { - return &FPEnumerateRes{}, ErrParamErr + return ErrParamErr } - const enumerateReplyHeaderLen = 6 if req.MaxReply < uint32(enumerateReplyHeaderLen+minEnumerateEntryLen(req.FileBitmap, req.DirBitmap)) { - return &FPEnumerateRes{}, ErrParamErr + return ErrParamErr } + return NoErr +} +// resolveEnumerateTarget walks DirID + Path to the directory whose contents +// will be enumerated. Returns the on-disk target path or an AFP error. +func (s *Service) resolveEnumerateTarget(req *FPEnumerateReq, volFS FileSystem) (string, int32) { + if _, ok := s.volumeRootByID(req.VolumeID); !ok { + return "", ErrParamErr + } parentPath, ok := s.getDIDPath(req.VolumeID, req.DirID) if !ok { - return &FPEnumerateRes{}, ErrDirNotFound + return "", ErrDirNotFound } - targetPath := parentPath if req.Path != "" { resolved, errCode := s.resolvePath(parentPath, req.Path, req.PathType) if errCode != NoErr { - if errCode == ErrAccessDenied { - return &FPEnumerateRes{}, ErrAccessDenied - } - return &FPEnumerateRes{}, ErrParamErr + return "", ErrParamErr } targetPath = resolved } - info, err := s.fs.Stat(targetPath) + info, err := volFS.Stat(targetPath) if err != nil { if errors.Is(err, fs.ErrPermission) { - return &FPEnumerateRes{}, ErrAccessDenied + return "", ErrAccessDenied } - return &FPEnumerateRes{}, ErrDirNotFound + return "", ErrDirNotFound } if !info.IsDir() { - return &FPEnumerateRes{}, ErrObjectTypeErr + return "", ErrObjectTypeErr } + return targetPath, NoErr +} - entries, err := s.fs.ReadDir(targetPath) +// readEnumerateEntries lists targetPath, preferring a range-aware backend when +// available so paging stays cheap on virtual volumes. visibleCount is the +// total entry count when the backend is range-aware (zero otherwise — the +// pager increments it as it walks). +func (s *Service) readEnumerateEntries(volFS FileSystem, targetPath string, req *FPEnumerateReq) ([]fs.DirEntry, int, bool, int32) { + if volFS.Capabilities().ReadDirRange { + entries, reqVisibleCount, err := volFS.ReadDirRange(targetPath, req.StartIndex, req.ReqCount) + if err == nil { + return entries, int(reqVisibleCount), true, NoErr + } + if !isNotSupported(err) { + return nil, 0, false, ErrDirNotFound + } + } + entries, err := volFS.ReadDir(targetPath) if err != nil { if errors.Is(err, fs.ErrPermission) { - return &FPEnumerateRes{}, ErrAccessDenied + return nil, 0, false, ErrAccessDenied } - return &FPEnumerateRes{}, ErrDirNotFound + return nil, 0, false, ErrDirNotFound } + return entries, 0, false, NoErr +} +// packEnumerateEntries pages, filters, and serialises directory entries into +// the FPEnumerate reply payload. Returns the wire bytes, the actual entry +// count emitted, and the total visible entry count (which the caller uses to +// detect "start index past end"). +func (s *Service) packEnumerateEntries(req *FPEnumerateReq, targetPath string, entries []fs.DirEntry, visibleCount int, usedRangeFS bool) ([]byte, uint16, int) { resData := new(bytes.Buffer) actCount := uint16(0) idx := uint16(1) - visibleCount := 0 for _, entry := range entries { if s.isMetadataArtifact(entry.Name(), entry.IsDir(), req.VolumeID) { continue } - if entry.IsDir() && req.DirBitmap == 0 { continue } if !entry.IsDir() && req.FileBitmap == 0 { continue } - visibleCount++ + if !usedRangeFS { + visibleCount++ + } - if idx < req.StartIndex { + if !usedRangeFS && idx < req.StartIndex { idx++ continue } @@ -115,62 +186,62 @@ func (s *AFPService) handleEnumerate(req *FPEnumerateReq) (*FPEnumerateRes, int3 break } - fullPath := filepath.Join(targetPath, entry.Name()) - info, err := s.fs.Stat(fullPath) - if err != nil { + entryBytes, ok := s.packEnumerateEntry(req.VolumeID, targetPath, entry, req.FileBitmap, req.DirBitmap) + if !ok { continue } - - entryBuf := new(bytes.Buffer) - entryBuf.WriteByte(0) - - isDir := entry.IsDir() - if isDir { - entryBuf.WriteByte(0x80) - } else { - entryBuf.WriteByte(0x00) - } - - bitmap := req.FileBitmap - if isDir { - bitmap = req.DirBitmap - } - - s.packFileInfo(entryBuf, req.VolumeID, bitmap, targetPath, entry.Name(), info, isDir) - - entryBytes := entryBuf.Bytes() - entryLength := len(entryBytes) - - if entryLength%2 != 0 { - entryBuf.WriteByte(0) - entryBytes = entryBuf.Bytes() - entryLength++ - } - - entryBytes[0] = byte(entryLength) - if uint32(enumerateReplyHeaderLen+resData.Len()+len(entryBytes)) > req.MaxReply { break } - resData.Write(entryBytes) actCount++ idx++ } + return resData.Bytes(), actCount, visibleCount +} - res := &FPEnumerateRes{ - FileBitmap: req.FileBitmap, - DirBitmap: req.DirBitmap, - ActCount: actCount, - Data: resData.Bytes(), +// packEnumerateEntry serialises a single FPEnumerate result entry. It +// returns the entry's wire bytes (with the leading length byte populated +// and any trailing pad applied) and ok=false if the entry should be +// skipped (Stat failure). The volFS lookup is repeated here rather than +// threaded in so the helper stays self-contained. +func (s *Service) packEnumerateEntry(volumeID uint16, parentPath string, entry fs.DirEntry, fileBitmap, dirBitmap uint16) ([]byte, bool) { + volFS := s.fsForVolume(volumeID) + if volFS == nil { + return nil, false + } + fullPath := filepath.Join(parentPath, entry.Name()) + info, err := volFS.Stat(fullPath) + if err != nil { + return nil, false } - errCode := NoErr - if actCount == 0 && req.StartIndex > uint16(visibleCount) { - errCode = ErrObjectNotFound + isDir := entry.IsDir() + if EnableAppleDoubleIconFallback && !isDir { + s.IngestAppleDoubleIcons(volumeID, fullPath) } - return res, errCode + entryBuf := new(bytes.Buffer) + entryBuf.WriteByte(0) + if isDir { + entryBuf.WriteByte(0x80) + } else { + entryBuf.WriteByte(0x00) + } + + bitmap := fileBitmap + if isDir { + bitmap = dirBitmap + } + s.packFileInfo(entryBuf, volumeID, bitmap, parentPath, entry.Name(), info, isDir) + + entryBytes := entryBuf.Bytes() + if len(entryBytes)%2 != 0 { + entryBuf.WriteByte(0) + entryBytes = entryBuf.Bytes() + } + entryBytes[0] = byte(len(entryBytes)) + return entryBytes, true } func minEnumerateEntryLen(fileBitmap, dirBitmap uint16) int { @@ -233,12 +304,12 @@ const ( DirBitmapProDOSInfo ) -func (s *AFPService) handleCloseDir(req *FPCloseDirReq) (*FPCloseDirRes, int32) { - log.Printf("[AFP] FPCloseDir called for DirID %d on Vol %d", req.DirID, req.VolumeID) +func (s *Service) handleCloseDir(req *FPCloseDirReq) (*FPCloseDirRes, int32) { + netlog.Debug("[AFP] FPCloseDir called for DirID %d on Vol %d", req.DirID, req.VolumeID) return &FPCloseDirRes{}, NoErr } -func (s *AFPService) handleSetDirParms(req *FPSetDirParmsReq) (*FPSetDirParmsRes, int32) { +func (s *Service) handleSetDirParms(req *FPSetDirParmsReq) (*FPSetDirParmsRes, int32) { if s.volumeIsReadOnly(req.VolumeID) { return &FPSetDirParmsRes{}, ErrVolLocked } @@ -250,7 +321,7 @@ func (s *AFPService) handleSetDirParms(req *FPSetDirParmsReq) (*FPSetDirParmsRes return &FPSetDirParmsRes{}, NoErr } -func (s *AFPService) handleCreateDir(req *FPCreateDirReq) (*FPCreateDirRes, int32) { +func (s *Service) handleCreateDir(req *FPCreateDirReq) (*FPCreateDirRes, int32) { if s.fs == nil { return &FPCreateDirRes{}, ErrAccessDenied } @@ -261,7 +332,11 @@ func (s *AFPService) handleCreateDir(req *FPCreateDirReq) (*FPCreateDirRes, int3 if errCode != NoErr { return &FPCreateDirRes{}, errCode } - if err := s.fs.CreateDir(targetPath); err != nil { + backend := s.fsForPath(targetPath) + if backend == nil { + return &FPCreateDirRes{}, ErrAccessDenied + } + if err := backend.CreateDir(targetPath); err != nil { if os.IsExist(err) { return &FPCreateDirRes{}, ErrObjectExists } diff --git a/service/afp/directory_models.go b/service/afp/directory_models.go index b98cbd4..5061545 100644 --- a/service/afp/directory_models.go +++ b/service/afp/directory_models.go @@ -1,10 +1,13 @@ +//go:build afp || all + package afp import ( - "bytes" "encoding/binary" "fmt" "strings" + + "github.com/pgodw/omnitalk/pkg/binutil" ) func formatDirBitmap(bitmap uint16) string { @@ -110,10 +113,16 @@ func (res *FPOpenDirRes) String() string { return fmt.Sprintf("FPOpenDirRes{DirID: %d}", res.DirID) } +func (res *FPOpenDirRes) WireSize() int { return 4 } + +func (res *FPOpenDirRes) MarshalWire(b []byte) (int, error) { + return binutil.PutU32(b, res.DirID) +} + func (res *FPOpenDirRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.DirID) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } type FPEnumerateReq struct { @@ -166,13 +175,36 @@ func (res *FPEnumerateRes) String() string { return fmt.Sprintf("FPEnumerateRes{FileBitmap: %s, DirBitmap: %s, ActCount: %d, DataLen: %d}", formatFileBitmap(res.FileBitmap), formatDirBitmap(res.DirBitmap), res.ActCount, len(res.Data)) } +func (res *FPEnumerateRes) WireSize() int { return 6 + len(res.Data) } + +func (res *FPEnumerateRes) MarshalWire(b []byte) (int, error) { + off := 0 + n, err := binutil.PutU16(b[off:], res.FileBitmap) + if err != nil { + return 0, err + } + off += n + n, err = binutil.PutU16(b[off:], res.DirBitmap) + if err != nil { + return 0, err + } + off += n + n, err = binutil.PutU16(b[off:], res.ActCount) + if err != nil { + return 0, err + } + off += n + if len(b[off:]) < len(res.Data) { + return 0, binutil.ErrShortBuffer + } + off += copy(b[off:], res.Data) + return off, nil +} + func (res *FPEnumerateRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.FileBitmap) - binary.Write(buf, binary.BigEndian, res.DirBitmap) - binary.Write(buf, binary.BigEndian, res.ActCount) - buf.Write(res.Data) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } // FPCreateDir - cmd(0), pad(1), VolumeID(2:4), DirID(4:8), PathType(8), PathLen(9), PathName(10:...) @@ -205,10 +237,16 @@ type FPCreateDirRes struct { DirID uint32 } +func (res *FPCreateDirRes) WireSize() int { return 4 } + +func (res *FPCreateDirRes) MarshalWire(b []byte) (int, error) { + return binutil.PutU32(b, res.DirID) +} + func (res *FPCreateDirRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.DirID) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPCreateDirRes) String() string { return fmt.Sprintf("FPCreateDirRes{DirID: %d}", res.DirID) diff --git a/service/afp/directory_models_golden_test.go b/service/afp/directory_models_golden_test.go new file mode 100644 index 0000000..f24baf8 --- /dev/null +++ b/service/afp/directory_models_golden_test.go @@ -0,0 +1,43 @@ +//go:build afp || all + +package afp + +import ( + "bytes" + "testing" +) + +func TestFPOpenDirRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPOpenDirRes{DirID: 0xCAFEF00D} + got := res.Marshal() + want := goldenBytes(t, "fpopendirres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +func TestFPCreateDirRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPCreateDirRes{DirID: 0xDEADBEEF} + got := res.Marshal() + want := goldenBytes(t, "fpcreatedirres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +func TestFPEnumerateRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPEnumerateRes{ + FileBitmap: 0x07FB, + DirBitmap: 0x0DFF, + ActCount: 3, + Data: []byte("enumerate-payload"), + } + got := res.Marshal() + want := goldenBytes(t, "fpenumerateres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} diff --git a/service/afp/dispatcher.go b/service/afp/dispatcher.go new file mode 100644 index 0000000..f12b39f --- /dev/null +++ b/service/afp/dispatcher.go @@ -0,0 +1,637 @@ +//go:build afp || all + +package afp + +import ( + "runtime/debug" + + "github.com/pgodw/omnitalk/netlog" +) + +// Request is the decoded form of an inbound AFP command. +type Request interface { + Unmarshal(data []byte) error + String() string +} + +// Response is a Service-produced AFP reply ready for wire emission. +type Response interface { + Marshal() []byte + String() string +} + +// HandleCommand decodes one AFP command, dispatches it through the registry, +// and returns the marshalled reply (or an AFP error code). Panics in handlers +// are recovered and surfaced as ErrParamErr so a single bad request cannot +// take down the session. +func (s *Service) HandleCommand(data []byte) (resBytes []byte, errCode int32) { + defer func() { + if r := recover(); r != nil { + netlog.Warn("[AFP] PANIC in cmd=%d: %v\n%s", data[0], r, debug.Stack()) + resBytes = nil + errCode = ErrParamErr + } + }() + if len(data) == 0 { + return nil, ErrParamErr + } + + cmd := data[0] + afpCommandsTotal.Inc() + + spec, ok := commandRegistry[cmd] + if !ok { + netlog.Debug("[AFP] unknown command %d", cmd) + return nil, ErrCallNotSupported + } + + req := spec.newReq() + cmdData := data + if spec.stripCmdByte { + cmdData = data[1:] + } + + if err := req.Unmarshal(cmdData); err != nil { + netlog.Debug("[AFP] Error unmarshaling cmd %d: %v", cmd, err) + return nil, ErrParamErr + } + + s.logPacket("[AFP] → %s", req.String()) + s.logResolvedPaths(req) + + res, errCode := spec.handle(s, req) + + if res != nil { + s.logPacket("[AFP] ← %s (err=%d)", res.String(), errCode) + resBytes = res.Marshal() + } else if errCode != NoErr { + s.logPacket("[AFP] ← cmd=%d err=%d", cmd, errCode) + } + + return resBytes, errCode +} + +// commandSpec describes how to dispatch one AFP command code. +// +// Each command names a request constructor (so we can decode into the right +// struct), a handler bound to the running Service, and an optional flag that +// strips the leading command byte before Unmarshal — FPLogin is the lone +// command whose request decoder expects the command byte already removed. +type commandSpec struct { + name string + newReq func() Request + handle func(s *Service, req Request) (Response, int32) + stripCmdByte bool +} + +// commandRegistry maps AFP command codes to their dispatch specs. +// +// Adding a new command: declare the spec here. The dispatcher in +// HandleCommand handles unmarshal, logging, response packing, and panic +// recovery uniformly. +// +// Each handle closure mirrors the original switch's nil-response treatment: +// if the concrete handler returns a nil pointer, surface it as a nil Response +// so the dispatcher skips Marshal. +var commandRegistry = map[uint8]commandSpec{ + FPGetSrvrInfo: { + name: "FPGetSrvrInfo", + newReq: func() Request { return &FPGetSrvrInfoReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleGetSrvrInfo(req.(*FPGetSrvrInfoReq)) + if err != nil { + return nil, ErrMiscErr + } + return res, NoErr + }, + }, + FPGetSrvrParms: { + name: "FPGetSrvrParms", + newReq: func() Request { return &FPGetSrvrParmsReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleGetSrvrParms(req.(*FPGetSrvrParmsReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPLogin: { + name: "FPLogin", + newReq: func() Request { return &FPLoginReq{} }, + stripCmdByte: true, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleLogin(req.(*FPLoginReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPLogout: { + name: "FPLogout", + newReq: func() Request { return &FPLogoutReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleLogout(req.(*FPLogoutReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPOpenVol: { + name: "FPOpenVol", + newReq: func() Request { return &FPOpenVolReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleOpenVol(req.(*FPOpenVolReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPGetVolParms: { + name: "FPGetVolParms", + newReq: func() Request { return &FPGetVolParmsReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleGetVolParms(req.(*FPGetVolParmsReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPOpenDir: { + name: "FPOpenDir", + newReq: func() Request { return &FPOpenDirReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleOpenDir(req.(*FPOpenDirReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPCloseVol: { + name: "FPCloseVol", + newReq: func() Request { return &FPCloseVolReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleCloseVol(req.(*FPCloseVolReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPCloseDir: { + name: "FPCloseDir", + newReq: func() Request { return &FPCloseDirReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleCloseDir(req.(*FPCloseDirReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPCloseFork: { + name: "FPCloseFork", + newReq: func() Request { return &FPCloseForkReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleCloseFork(req.(*FPCloseForkReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPFlush: { + name: "FPFlush", + newReq: func() Request { return &FPFlushReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + return s.handleFlush(req.(*FPFlushReq)) + }, + }, + FPFlushFork: { + name: "FPFlushFork", + newReq: func() Request { return &FPFlushForkReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + return s.handleFlushFork(req.(*FPFlushForkReq)) + }, + }, + FPEnumerate: { + name: "FPEnumerate", + newReq: func() Request { return &FPEnumerateReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleEnumerate(req.(*FPEnumerateReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPGetFileDirParms: { + name: "FPGetFileDirParms", + newReq: func() Request { return &FPGetFileDirParmsReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleGetFileDirParms(req.(*FPGetFileDirParmsReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPOpenFork: { + name: "FPOpenFork", + newReq: func() Request { return &FPOpenForkReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleOpenFork(req.(*FPOpenForkReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPRead: { + name: "FPRead", + newReq: func() Request { return &FPReadReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleRead(req.(*FPReadReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPWrite: { + name: "FPWrite", + newReq: func() Request { return &FPWriteReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleWrite(req.(*FPWriteReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPCreateFile: { + name: "FPCreateFile", + newReq: func() Request { return &FPCreateFileReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleCreateFile(req.(*FPCreateFileReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPCreateDir: { + name: "FPCreateDir", + newReq: func() Request { return &FPCreateDirReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleCreateDir(req.(*FPCreateDirReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPDelete: { + name: "FPDelete", + newReq: func() Request { return &FPDeleteReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleDelete(req.(*FPDeleteReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPRename: { + name: "FPRename", + newReq: func() Request { return &FPRenameReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleRename(req.(*FPRenameReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPByteRangeLock: { + name: "FPByteRangeLock", + newReq: func() Request { return &FPByteRangeLockReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleByteRangeLock(req.(*FPByteRangeLockReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPCopyFile: { + name: "FPCopyFile", + newReq: func() Request { return &FPCopyFileReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleCopyFile(req.(*FPCopyFileReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPGetDirParms: { + name: "FPGetDirParms", + newReq: func() Request { return &FPGetDirParmsReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleGetDirParms(req.(*FPGetDirParmsReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPGetFileParms: { + name: "FPGetFileParms", + newReq: func() Request { return &FPGetFileParmsReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleGetFileParms(req.(*FPGetFileParmsReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPGetForkParms: { + name: "FPGetForkParms", + newReq: func() Request { return &FPGetForkParmsReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleGetForkParms(req.(*FPGetForkParmsReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPLoginCont: { + name: "FPLoginCont", + newReq: func() Request { return &FPLoginContReq{} }, + // TODO: Implement second-phase UAM login (AFP 2.x §5.1.19). + handle: func(s *Service, req Request) (Response, int32) { + return nil, ErrCallNotSupported + }, + }, + FPMapID: { + name: "FPMapID", + newReq: func() Request { return &FPMapIDReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleMapID(req.(*FPMapIDReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPMapName: { + name: "FPMapName", + newReq: func() Request { return &FPMapNameReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleMapName(req.(*FPMapNameReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPMoveAndRename: { + name: "FPMoveAndRename", + newReq: func() Request { return &FPMoveAndRenameReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleMoveAndRename(req.(*FPMoveAndRenameReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPSetDirParms: { + name: "FPSetDirParms", + newReq: func() Request { return &FPSetDirParmsReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleSetDirParms(req.(*FPSetDirParmsReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPSetFileParms: { + name: "FPSetFileParms", + newReq: func() Request { return &FPSetFileParmsReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleSetFileParms(req.(*FPSetFileParmsReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPSetForkParms: { + name: "FPSetForkParms", + newReq: func() Request { return &FPSetForkParmsReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleSetForkParms(req.(*FPSetForkParmsReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPSetVolParms: { + name: "FPSetVolParms", + newReq: func() Request { return &FPSetVolParmsReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleSetVolParms(req.(*FPSetVolParmsReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPSetFileDirParms: { + name: "FPSetFileDirParms", + newReq: func() Request { return &FPSetFileDirParmsReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleSetFileDirParms(req.(*FPSetFileDirParmsReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPExchangeFiles: { + name: "FPExchangeFiles", + newReq: func() Request { return &FPExchangeFilesReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleExchangeFiles(req.(*FPExchangeFilesReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPGetSrvrMsg: { + name: "FPGetSrvrMsg", + newReq: func() Request { return &FPGetSrvrMsgReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + r := req.(*FPGetSrvrMsgReq) + return &FPGetSrvrMsgRes{MessageType: r.MessageType}, NoErr + }, + }, + FPChangePassword: { + name: "FPChangePassword", + newReq: func() Request { return &FPUnsupportedReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + return nil, ErrCallNotSupported + }, + }, + FPGetUserInfo: { + name: "FPGetUserInfo", + newReq: func() Request { return &FPUnsupportedReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + return nil, ErrCallNotSupported + }, + }, + FPCatSearch: { + name: "FPCatSearch", + newReq: func() Request { return &FPCatSearchReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleCatSearch(req.(*FPCatSearchReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPOpenDT: { + name: "FPOpenDT", + newReq: func() Request { return &FPOpenDTReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleOpenDT(req.(*FPOpenDTReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPCloseDT: { + name: "FPCloseDT", + newReq: func() Request { return &FPCloseDTReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleCloseDT(req.(*FPCloseDTReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPGetIcon: { + name: "FPGetIcon", + newReq: func() Request { return &FPGetIconReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleGetIcon(req.(*FPGetIconReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPGetIconInfo: { + name: "FPGetIconInfo", + newReq: func() Request { return &FPGetIconInfoReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleGetIconInfo(req.(*FPGetIconInfoReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPAddIcon: { + name: "FPAddIcon", + newReq: func() Request { return &FPAddIconReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleAddIcon(req.(*FPAddIconReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPAddAPPL: { + name: "FPAddAPPL", + newReq: func() Request { return &FPAddAPPLReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleAddAPPL(req.(*FPAddAPPLReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPRemoveAPPL: { + name: "FPRemoveAPPL", + newReq: func() Request { return &FPRemoveAPPLReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleRemoveAPPL(req.(*FPRemoveAPPLReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPGetAPPL: { + name: "FPGetAPPL", + newReq: func() Request { return &FPGetAPPLReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleGetAPPL(req.(*FPGetAPPLReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPAddComment: { + name: "FPAddComment", + newReq: func() Request { return &FPAddCommentReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleAddComment(req.(*FPAddCommentReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPRemoveComment: { + name: "FPRemoveComment", + newReq: func() Request { return &FPRemoveCommentReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleRemoveComment(req.(*FPRemoveCommentReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, + FPGetComment: { + name: "FPGetComment", + newReq: func() Request { return &FPGetCommentReq{} }, + handle: func(s *Service, req Request) (Response, int32) { + res, err := s.handleGetComment(req.(*FPGetCommentReq)) + if res == nil { + return nil, err + } + return res, err + }, + }, +} diff --git a/service/afp/enumerate_encoding_test.go b/service/afp/enumerate_encoding_test.go index f1b007b..32fd59e 100644 --- a/service/afp/enumerate_encoding_test.go +++ b/service/afp/enumerate_encoding_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -8,10 +10,182 @@ import ( "os" "path/filepath" "testing" + "time" - "github.com/pgodw/omnitalk/go/appletalk" + "github.com/pgodw/omnitalk/pkg/encoding" ) +type enumStubInfo struct { + name string + mode fs.FileMode + isDir bool +} + +func (i *enumStubInfo) Name() string { return i.name } +func (i *enumStubInfo) Size() int64 { return 0 } +func (i *enumStubInfo) Mode() fs.FileMode { return i.mode } +func (i *enumStubInfo) ModTime() time.Time { return time.Time{} } +func (i *enumStubInfo) IsDir() bool { return i.isDir } +func (i *enumStubInfo) Sys() any { return nil } + +type enumStubDirEntry struct{ info fs.FileInfo } + +func (d enumStubDirEntry) Name() string { return d.info.Name() } +func (d enumStubDirEntry) IsDir() bool { return d.info.IsDir() } +func (d enumStubDirEntry) Type() fs.FileMode { return d.info.Mode().Type() } +func (d enumStubDirEntry) Info() (fs.FileInfo, error) { return d.info, nil } + +type childCountSpyFS struct { + root string + childCountCalls int + readDirCalls []string +} + +type rangeSpyFS struct { + root string + readDirCalls []string + rangeCalls []string + lastStartIndex uint16 + lastReqCount uint16 +} + +type rangeEmptySpyFS struct { + root string +} + +func (s *childCountSpyFS) ReadDir(path string) ([]fs.DirEntry, error) { + s.readDirCalls = append(s.readDirCalls, filepath.Clean(path)) + if filepath.Clean(path) == filepath.Clean(s.root) { + return []fs.DirEntry{ + enumStubDirEntry{info: &enumStubInfo{name: "Apps", mode: fs.ModeDir | 0o555, isDir: true}}, + enumStubDirEntry{info: &enumStubInfo{name: "Games", mode: fs.ModeDir | 0o555, isDir: true}}, + }, nil + } + return nil, fs.ErrPermission +} + +func (s *childCountSpyFS) Stat(path string) (fs.FileInfo, error) { + clean := filepath.Clean(path) + if clean == filepath.Clean(s.root) || clean == filepath.Join(s.root, "Apps") || clean == filepath.Join(s.root, "Games") { + return &enumStubInfo{name: filepath.Base(clean), mode: fs.ModeDir | 0o555, isDir: true}, nil + } + return nil, fs.ErrNotExist +} + +func (s *childCountSpyFS) DiskUsage(path string) (uint64, uint64, error) { return 0, 0, nil } +func (s *childCountSpyFS) CreateDir(path string) error { return fs.ErrPermission } +func (s *childCountSpyFS) CreateFile(path string) (File, error) { return nil, fs.ErrPermission } +func (s *childCountSpyFS) OpenFile(path string, flag int) (File, error) { return nil, fs.ErrPermission } +func (s *childCountSpyFS) Remove(path string) error { return fs.ErrPermission } +func (s *childCountSpyFS) Rename(oldpath, newpath string) error { return fs.ErrPermission } +func (s *childCountSpyFS) Capabilities() FileSystemCapabilities { + return FileSystemCapabilities{ChildCount: true} +} +func (s *childCountSpyFS) CatSearch(volumeRoot string, query string, reqMatches int32, cursor [16]byte) ([]string, [16]byte, int32) { + return nil, cursor, ErrCallNotSupported +} +func (s *childCountSpyFS) ReadDirRange(path string, startIndex uint16, reqCount uint16) ([]fs.DirEntry, uint16, error) { + return nil, 0, newNotSupported("ReadDirRange") +} +func (s *childCountSpyFS) DirAttributes(path string) (uint16, error) { return 0, nil } +func (s *childCountSpyFS) IsReadOnly(path string) (bool, error) { return false, nil } +func (s *childCountSpyFS) SupportsCatSearch(path string) (bool, error) { return false, nil } + +func (s *rangeSpyFS) ReadDir(path string) ([]fs.DirEntry, error) { + s.readDirCalls = append(s.readDirCalls, filepath.Clean(path)) + return nil, fs.ErrPermission +} + +func (s *rangeSpyFS) Stat(path string) (fs.FileInfo, error) { + clean := filepath.Clean(path) + if clean == filepath.Clean(s.root) { + return &enumStubInfo{name: filepath.Base(clean), mode: fs.ModeDir | 0o555, isDir: true}, nil + } + if clean == filepath.Join(s.root, "Gamma") || clean == filepath.Join(s.root, "Delta") { + return &enumStubInfo{name: filepath.Base(clean), mode: fs.ModeDir | 0o555, isDir: true}, nil + } + return nil, fs.ErrNotExist +} + +func (s *rangeSpyFS) DiskUsage(path string) (uint64, uint64, error) { return 0, 0, nil } +func (s *rangeSpyFS) CreateDir(path string) error { return fs.ErrPermission } +func (s *rangeSpyFS) CreateFile(path string) (File, error) { return nil, fs.ErrPermission } +func (s *rangeSpyFS) OpenFile(path string, flag int) (File, error) { return nil, fs.ErrPermission } +func (s *rangeSpyFS) Remove(path string) error { return fs.ErrPermission } +func (s *rangeSpyFS) Rename(oldpath, newpath string) error { return fs.ErrPermission } +func (s *rangeSpyFS) Capabilities() FileSystemCapabilities { + return FileSystemCapabilities{ReadDirRange: true} +} +func (s *rangeSpyFS) CatSearch(volumeRoot string, query string, reqMatches int32, cursor [16]byte) ([]string, [16]byte, int32) { + return nil, cursor, ErrCallNotSupported +} +func (s *rangeSpyFS) ChildCount(path string) (uint16, error) { return 0, newNotSupported("ChildCount") } +func (s *rangeSpyFS) DirAttributes(path string) (uint16, error) { + return 0, nil +} +func (s *rangeSpyFS) IsReadOnly(path string) (bool, error) { return false, nil } +func (s *rangeSpyFS) SupportsCatSearch(path string) (bool, error) { return false, nil } + +func (s *rangeSpyFS) ReadDirRange(path string, startIndex uint16, reqCount uint16) ([]fs.DirEntry, uint16, error) { + s.rangeCalls = append(s.rangeCalls, filepath.Clean(path)) + s.lastStartIndex = startIndex + s.lastReqCount = reqCount + return []fs.DirEntry{ + enumStubDirEntry{info: &enumStubInfo{name: "Gamma", mode: fs.ModeDir | 0o555, isDir: true}}, + enumStubDirEntry{info: &enumStubInfo{name: "Delta", mode: fs.ModeDir | 0o555, isDir: true}}, + }, 7, nil +} + +func (s *rangeEmptySpyFS) ReadDir(path string) ([]fs.DirEntry, error) { + return nil, fs.ErrPermission +} + +func (s *rangeEmptySpyFS) Stat(path string) (fs.FileInfo, error) { + if filepath.Clean(path) == filepath.Clean(s.root) { + return &enumStubInfo{name: filepath.Base(path), mode: fs.ModeDir | 0o555, isDir: true}, nil + } + return nil, fs.ErrNotExist +} + +func (s *rangeEmptySpyFS) DiskUsage(path string) (uint64, uint64, error) { return 0, 0, nil } +func (s *rangeEmptySpyFS) CreateDir(path string) error { return fs.ErrPermission } +func (s *rangeEmptySpyFS) CreateFile(path string) (File, error) { return nil, fs.ErrPermission } +func (s *rangeEmptySpyFS) OpenFile(path string, flag int) (File, error) { return nil, fs.ErrPermission } +func (s *rangeEmptySpyFS) Remove(path string) error { return fs.ErrPermission } +func (s *rangeEmptySpyFS) Rename(oldpath, newpath string) error { return fs.ErrPermission } +func (s *rangeEmptySpyFS) Capabilities() FileSystemCapabilities { + return FileSystemCapabilities{ReadDirRange: true} +} +func (s *rangeEmptySpyFS) CatSearch(volumeRoot string, query string, reqMatches int32, cursor [16]byte) ([]string, [16]byte, int32) { + return nil, cursor, ErrCallNotSupported +} +func (s *rangeEmptySpyFS) ChildCount(path string) (uint16, error) { + return 0, newNotSupported("ChildCount") +} +func (s *rangeEmptySpyFS) DirAttributes(path string) (uint16, error) { + return 0, nil +} +func (s *rangeEmptySpyFS) IsReadOnly(path string) (bool, error) { return false, nil } +func (s *rangeEmptySpyFS) SupportsCatSearch(path string) (bool, error) { return false, nil } + +func (s *rangeEmptySpyFS) ReadDirRange(path string, startIndex uint16, reqCount uint16) ([]fs.DirEntry, uint16, error) { + // Deliberately returns an empty page with a bogus non-zero visibleCount to + // emulate a backend that does not provide a reliable total count. + return nil, 1000, nil +} + +func (s *childCountSpyFS) ChildCount(path string) (uint16, error) { + s.childCountCalls++ + switch filepath.Clean(path) { + case filepath.Join(s.root, "Apps"): + return 11, nil + case filepath.Join(s.root, "Games"): + return 22, nil + default: + return 0, newNotSupported("ChildCount") + } +} + type denyReadDirFS struct { *LocalFileSystem denyPath string @@ -52,7 +226,7 @@ func firstEnumerateLongName(entryData []byte) ([]byte, error) { func TestHandleEnumerate_LongNameEncodedAsMacRoman(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) hostName := "Netscape Navigator™ 2.02" if err := os.WriteFile(filepath.Join(root, hostName), []byte("x"), 0644); err != nil { @@ -83,7 +257,7 @@ func TestHandleEnumerate_LongNameEncodedAsMacRoman(t *testing.T) { if err != nil { t.Fatalf("parse enumerate long name: %v", err) } - wantName := appletalk.UTF8ToMacRoman(hostName) + wantName := encoding.UTF8ToMacRoman(hostName) if !bytes.Equal(gotName, wantName) { t.Fatalf("enumerate name bytes = %x, want %x", gotName, wantName) } @@ -94,7 +268,7 @@ func TestHandleEnumerate_LongNameEncodedAsMacRoman(t *testing.T) { func TestHandleEnumerate_PathDecodesMacRoman(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) dirName := "Folder™" fileName := "Inside™.txt" @@ -130,7 +304,7 @@ func TestHandleEnumerate_PathDecodesMacRoman(t *testing.T) { if err != nil { t.Fatalf("parse enumerate long name: %v", err) } - wantName := appletalk.UTF8ToMacRoman(fileName) + wantName := encoding.UTF8ToMacRoman(fileName) if !bytes.Equal(gotName, wantName) { t.Fatalf("enumerate name bytes = %x, want %x", gotName, wantName) } @@ -141,7 +315,7 @@ func TestHandleEnumerate_PathDecodesMacRoman(t *testing.T) { // enumerable entries. func TestHandleEnumerate_SidecarsExcludedFromCount(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) // Create 2 real files and a sidecar for each — 4 filesystem entries total. for _, name := range []string{"Alpha", "Beta"} { @@ -179,7 +353,7 @@ func TestHandleEnumerate_SidecarsExcludedFromCount(t *testing.T) { // entries, not the raw filesystem entry count. func TestHandleEnumerate_EndOfDirUsesVisibleCount(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) // 2 real files + 2 sidecars = 4 raw entries, but only 2 visible. for _, name := range []string{"Alpha", "Beta"} { @@ -220,13 +394,106 @@ func TestHandleEnumerate_EndOfDirUsesVisibleCount(t *testing.T) { } } +func TestHandleEnumerate_UsesChildCountWithoutRecursiveReadDir(t *testing.T) { + root := t.TempDir() + spy := &childCountSpyFS{root: root} + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, spy, nil) + + req := &FPEnumerateReq{ + VolumeID: 1, + DirID: CNIDRoot, + FileBitmap: 0, + DirBitmap: DirBitmapLongName | DirBitmapOffspringCount, + ReqCount: 64, + StartIndex: 1, + MaxReply: 1152, + PathType: 2, + Path: "", + } + + res, errCode := s.handleEnumerate(req) + if errCode != NoErr { + t.Fatalf("handleEnumerate err = %d, want %d", errCode, NoErr) + } + if res.ActCount != 2 { + t.Fatalf("ActCount = %d, want 2", res.ActCount) + } + if spy.childCountCalls != 2 { + t.Fatalf("ChildCount calls = %d, want 2", spy.childCountCalls) + } + if len(spy.readDirCalls) != 1 || filepath.Clean(spy.readDirCalls[0]) != filepath.Clean(root) { + t.Fatalf("ReadDir calls = %v, want only root enumerate", spy.readDirCalls) + } +} + +func TestHandleEnumerate_UsesReadDirRangeWhenAvailable(t *testing.T) { + root := t.TempDir() + spy := &rangeSpyFS{root: root} + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, spy, nil) + + req := &FPEnumerateReq{ + VolumeID: 1, + DirID: CNIDRoot, + FileBitmap: 0, + DirBitmap: DirBitmapLongName, + ReqCount: 2, + StartIndex: 3, + MaxReply: 1152, + PathType: 2, + Path: "", + } + + res, errCode := s.handleEnumerate(req) + if errCode != NoErr { + t.Fatalf("handleEnumerate err = %d, want %d", errCode, NoErr) + } + if res.ActCount != 2 { + t.Fatalf("ActCount = %d, want 2", res.ActCount) + } + if len(spy.rangeCalls) != 1 || filepath.Clean(spy.rangeCalls[0]) != filepath.Clean(root) { + t.Fatalf("ReadDirRange calls = %v, want only root", spy.rangeCalls) + } + if spy.lastStartIndex != 3 || spy.lastReqCount != 2 { + t.Fatalf("ReadDirRange args = (%d, %d), want (3, 2)", spy.lastStartIndex, spy.lastReqCount) + } + if len(spy.readDirCalls) != 0 { + t.Fatalf("ReadDir calls = %v, want none", spy.readDirCalls) + } + if res.Data == nil || len(res.Data) == 0 { + t.Fatal("expected enumerate data from range provider") + } +} + +func TestHandleEnumerate_RangeEmptyPageReturnsObjectNotFound(t *testing.T) { + root := t.TempDir() + spy := &rangeEmptySpyFS{root: root} + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, spy, nil) + + req := &FPEnumerateReq{ + VolumeID: 1, + DirID: CNIDRoot, + FileBitmap: 0, + DirBitmap: DirBitmapLongName, + ReqCount: 64, + StartIndex: 11, + MaxReply: 1152, + PathType: 2, + Path: "", + } + + _, errCode := s.handleEnumerate(req) + if errCode != ErrObjectNotFound { + t.Fatalf("errCode = %d, want ErrObjectNotFound (%d)", errCode, ErrObjectNotFound) + } +} + // TestHandleEnumerate_LegacyAppleDoubleDirExcluded verifies that legacy // metadata directories are never treated as user-visible entries. func TestHandleEnumerate_LegacyAppleDoubleDirExcluded(t *testing.T) { root := t.TempDir() - options := DefaultAFPOptions() + options := DefaultOptions() options.AppleDoubleMode = AppleDoubleModeLegacy - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil, options) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil, options) for _, name := range []string{"Alpha", "Beta"} { if err := os.WriteFile(filepath.Join(root, name), []byte("x"), 0644); err != nil { @@ -274,9 +541,9 @@ func TestHandleEnumerate_LegacyAppleDoubleDirExcluded(t *testing.T) { // .AppleDouble metadata directories are hidden regardless of on-disk case. func TestHandleEnumerate_LegacyAppleDoubleDirCaseInsensitive(t *testing.T) { root := t.TempDir() - options := DefaultAFPOptions() + options := DefaultOptions() options.AppleDoubleMode = AppleDoubleModeLegacy - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil, options) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil, options) if err := os.WriteFile(filepath.Join(root, "Visible"), []byte("x"), 0644); err != nil { t.Fatalf("seed visible file: %v", err) @@ -313,9 +580,38 @@ func TestHandleEnumerate_LegacyAppleDoubleDirCaseInsensitive(t *testing.T) { func TestHandleEnumerate_ErrorsForBitmapAndReplyValidation(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) _, errCode := s.handleEnumerate(&FPEnumerateReq{ + VolumeID: 999, + DirID: CNIDRoot, + FileBitmap: FileBitmapLongName, + DirBitmap: DirBitmapLongName, + ReqCount: 1, + StartIndex: 1, + MaxReply: 4096, + PathType: 2, + }) + if errCode != ErrParamErr { + t.Fatalf("unknown VolumeID errCode=%d, want ErrParamErr (%d)", errCode, ErrParamErr) + } + + _, errCode = s.handleEnumerate(&FPEnumerateReq{ + VolumeID: 1, + DirID: CNIDRoot, + FileBitmap: FileBitmapLongName, + DirBitmap: DirBitmapLongName, + ReqCount: 1, + StartIndex: 1, + MaxReply: 4096, + PathType: 99, + Path: "anything", + }) + if errCode != ErrParamErr { + t.Fatalf("bad PathType errCode=%d, want ErrParamErr (%d)", errCode, ErrParamErr) + } + + _, errCode = s.handleEnumerate(&FPEnumerateReq{ VolumeID: 1, DirID: CNIDRoot, FileBitmap: 0, @@ -356,11 +652,26 @@ func TestHandleEnumerate_ErrorsForBitmapAndReplyValidation(t *testing.T) { if errCode != ErrParamErr { t.Fatalf("small MaxReply errCode=%d, want ErrParamErr (%d)", errCode, ErrParamErr) } + + _, errCode = s.handleEnumerate(&FPEnumerateReq{ + VolumeID: 1, + DirID: CNIDRoot, + FileBitmap: FileBitmapLongName, + DirBitmap: DirBitmapLongName, + ReqCount: 1, + StartIndex: 1, + MaxReply: 4096, + PathType: 2, + Path: string([]byte{'b', 'a', 'd', 0x00, 0x00, 0x00, 0x00, 'n', 'a', 'm', 'e'}), + }) + if errCode != ErrParamErr { + t.Fatalf("bad pathname errCode=%d, want ErrParamErr (%d)", errCode, ErrParamErr) + } } func TestHandleEnumerate_ErrorsForDirectoryTarget(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) if err := os.WriteFile(filepath.Join(root, "afile"), []byte("x"), 0644); err != nil { t.Fatalf("seed file: %v", err) @@ -418,7 +729,7 @@ func TestHandleEnumerate_AccessDeniedFromReadDir(t *testing.T) { t.Fatalf("mkdir deny dir: %v", err) } - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &denyReadDirFS{LocalFileSystem: &LocalFileSystem{}, denyPath: denyDir}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &denyReadDirFS{LocalFileSystem: &LocalFileSystem{}, denyPath: denyDir}, nil) _, errCode := s.handleEnumerate(&FPEnumerateReq{ VolumeID: 1, @@ -438,7 +749,7 @@ func TestHandleEnumerate_AccessDeniedFromReadDir(t *testing.T) { func TestHandleEnumerate_AcceptsFinderFullBitmaps(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) if err := os.WriteFile(filepath.Join(root, "Alpha"), []byte("x"), 0644); err != nil { t.Fatalf("seed file: %v", err) @@ -468,7 +779,7 @@ func TestHandleEnumerate_AcceptsFinderFullBitmaps(t *testing.T) { func TestHandleEnumerate_RespectsMaxReplyIncludingHeader(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) for i := 0; i < 40; i++ { name := fmt.Sprintf("Item-%02d", i) diff --git a/service/afp/errors.go b/service/afp/errors.go new file mode 100644 index 0000000..efa2451 --- /dev/null +++ b/service/afp/errors.go @@ -0,0 +1,34 @@ +//go:build afp || all + +package afp + +import ( + "errors" + "fmt" +) + +// ErrCopySourceReadEOF indicates a source read failure during copy that should +// map to AFP ErrEOFErr. +var ErrCopySourceReadEOF = errors.New("copy source read eof") + +// NotSupportedError indicates a filesystem operation exists but is not +// supported by a specific backend. +type NotSupportedError struct { + Operation string +} + +func (e *NotSupportedError) Error() string { + if e == nil || e.Operation == "" { + return "not supported" + } + return fmt.Sprintf("not supported: %s", e.Operation) +} + +func newNotSupported(op string) error { + return &NotSupportedError{Operation: op} +} + +func isNotSupported(err error) bool { + var ns *NotSupportedError + return errors.As(err, &ns) +} diff --git a/service/afp/extension_map.go b/service/afp/extension_map.go index a9b57d8..de40d7e 100644 --- a/service/afp/extension_map.go +++ b/service/afp/extension_map.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( diff --git a/service/afp/extension_map_test.go b/service/afp/extension_map_test.go index 8438202..24e7315 100644 --- a/service/afp/extension_map_test.go +++ b/service/afp/extension_map_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -105,9 +107,9 @@ func TestHandleGetFileParms_UsesExtensionMapWithoutPersisting(t *testing.T) { t.Fatalf("NewExtensionMap: %v", err) } - options := DefaultAFPOptions() + options := DefaultOptions() options.ExtensionMap = extMap - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil, options) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil, options) if tc.seedFinderInfo != nil { if err := s.metaFor(1).WriteFinderInfo(filePath, *tc.seedFinderInfo); err != nil { @@ -133,8 +135,8 @@ func TestHandleGetFileParms_UsesExtensionMapWithoutPersisting(t *testing.T) { } if tc.checkNoPersistence { - if len(s.desktopDBs) != 0 { - t.Fatalf("desktopDBs len = %d, want 0", len(s.desktopDBs)) + if n := s.desktop.dbCount(); n != 0 { + t.Fatalf("desktopDBs len = %d, want 0", n) } if _, err := os.Stat(filepath.Join(root, "._ReadMe.txt")); !os.IsNotExist(err) { t.Fatalf("AppleDouble sidecar unexpectedly created: err=%v", err) diff --git a/service/afp/file.go b/service/afp/file.go index 3a1b25b..7ba1ec1 100644 --- a/service/afp/file.go +++ b/service/afp/file.go @@ -1,13 +1,16 @@ +//go:build afp || all + package afp import ( + "github.com/pgodw/omnitalk/netlog" + "errors" "io" - "log" "os" "path/filepath" ) -func (s *AFPService) handleSetFileParms(req *FPSetFileParmsReq) (*FPSetFileParmsRes, int32) { +func (s *Service) handleSetFileParms(req *FPSetFileParmsReq) (*FPSetFileParmsRes, int32) { if s.volumeIsReadOnly(req.VolumeID) { return &FPSetFileParmsRes{}, ErrVolLocked } @@ -19,7 +22,7 @@ func (s *AFPService) handleSetFileParms(req *FPSetFileParmsReq) (*FPSetFileParms return &FPSetFileParmsRes{}, NoErr } -func (s *AFPService) handleCreateFile(req *FPCreateFileReq) (*FPCreateFileRes, int32) { +func (s *Service) handleCreateFile(req *FPCreateFileReq) (*FPCreateFileRes, int32) { if s.fs == nil { return &FPCreateFileRes{}, ErrAccessDenied } @@ -30,14 +33,18 @@ func (s *AFPService) handleCreateFile(req *FPCreateFileReq) (*FPCreateFileRes, i if errCode != NoErr { return &FPCreateFileRes{}, errCode } + backend := s.fsForPath(targetPath) + if backend == nil { + return &FPCreateFileRes{}, ErrAccessDenied + } if req.HasFlag(FPCreateFileFlagHardCreate) { - f, err := s.fs.CreateFile(targetPath) + f, err := backend.CreateFile(targetPath) if err != nil { return &FPCreateFileRes{}, ErrAccessDenied } f.Close() } else { - f, err := s.fs.OpenFile(targetPath, os.O_CREATE|os.O_EXCL) + f, err := backend.OpenFile(targetPath, os.O_CREATE|os.O_EXCL) if err != nil { if os.IsExist(err) { return &FPCreateFileRes{}, ErrObjectExists @@ -49,7 +56,7 @@ func (s *AFPService) handleCreateFile(req *FPCreateFileReq) (*FPCreateFileRes, i return &FPCreateFileRes{}, NoErr } -func (s *AFPService) handleCopyFile(req *FPCopyFileReq) (*FPCopyFileRes, int32) { +func (s *Service) handleCopyFile(req *FPCopyFileReq) (*FPCopyFileRes, int32) { srcParent, ok := s.resolveDIDPath(req.SrcVolumeID, req.SrcDirID) if !ok { return &FPCopyFileRes{}, ErrObjectNotFound @@ -91,18 +98,23 @@ func (s *AFPService) handleCopyFile(req *FPCopyFileReq) (*FPCopyFileRes, int32) copyName = filepath.Base(srcPath) } dstPath := s.canonicalizePath(filepath.Join(dstParent, copyName)) + srcBackend := s.fsForPath(srcPath) + dstBackend := s.fsForPath(dstPath) + if srcBackend == nil || dstBackend == nil { + return &FPCopyFileRes{}, ErrAccessDenied + } - if _, err := s.fs.Stat(dstPath); err == nil { + if _, err := dstBackend.Stat(dstPath); err == nil { return &FPCopyFileRes{}, ErrObjectExists } - srcFile, err := s.fs.OpenFile(srcPath, os.O_RDONLY) + srcFile, err := srcBackend.OpenFile(srcPath, os.O_RDONLY) if err != nil { return &FPCopyFileRes{}, ErrObjectNotFound } defer srcFile.Close() - dstFile, err := s.fs.CreateFile(dstPath) + dstFile, err := dstBackend.CreateFile(dstPath) if err != nil { return &FPCopyFileRes{}, ErrAccessDenied } @@ -122,6 +134,9 @@ func (s *AFPService) handleCopyFile(req *FPCopyFileReq) (*FPCopyFileRes, int32) break } if readErr != nil { + if errors.Is(readErr, ErrCopySourceReadEOF) { + return &FPCopyFileRes{}, ErrEOFErr + } return &FPCopyFileRes{}, ErrMiscErr } } @@ -130,7 +145,7 @@ func (s *AFPService) handleCopyFile(req *FPCopyFileReq) (*FPCopyFileRes, int32) dstMeta := s.metaFor(req.DstVolumeID) if srcMeta != nil && dstMeta != nil { if err := dstMeta.CopyMetadataFrom(srcMeta, srcPath, dstPath); err != nil { - log.Printf("[AFP] warning: metadata copy failed %q -> %q: %v", srcPath, dstPath, err) + netlog.Debug("[AFP] warning: metadata copy failed %q -> %q: %v", srcPath, dstPath, err) } } diff --git a/service/afp/file_models.go b/service/afp/file_models.go index 44aa0a7..9783a02 100644 --- a/service/afp/file_models.go +++ b/service/afp/file_models.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( diff --git a/service/afp/filedir.go b/service/afp/filedir.go index 0b23812..2d5d79d 100644 --- a/service/afp/filedir.go +++ b/service/afp/filedir.go @@ -1,13 +1,15 @@ +//go:build afp || all + package afp import ( + "github.com/pgodw/omnitalk/netlog" "bytes" "io/fs" - "log" "path/filepath" ) -func (s *AFPService) handleGetFileDirParms(req *FPGetFileDirParmsReq) (*FPGetFileDirParmsRes, int32) { +func (s *Service) handleGetFileDirParms(req *FPGetFileDirParmsReq) (*FPGetFileDirParmsRes, int32) { if req.FileBitmap == 0 && req.DirBitmap == 0 { return &FPGetFileDirParmsRes{}, ErrBitmapErr } @@ -43,7 +45,11 @@ func (s *AFPService) handleGetFileDirParms(req *FPGetFileDirParmsReq) (*FPGetFil if req.Path != "" { infoPath, info, err = s.statPathWithAppleDoubleFallback(targetPath) } else { - info, err = s.fs.Stat(targetPath) + backend := s.fsForPath(targetPath) + if backend == nil { + return emptyGetFileDirParmsRes(req), ErrObjectNotFound + } + info, err = backend.Stat(targetPath) } if err != nil { return emptyGetFileDirParmsRes(req), ErrObjectNotFound @@ -84,7 +90,7 @@ func emptyGetFileDirParmsRes(req *FPGetFileDirParmsReq) *FPGetFileDirParmsRes { } } -func (s *AFPService) handleRename(req *FPRenameReq) (*FPRenameRes, int32) { +func (s *Service) handleRename(req *FPRenameReq) (*FPRenameRes, int32) { if s.volumeIsReadOnly(req.VolumeID) { return &FPRenameRes{}, ErrVolLocked } @@ -101,12 +107,16 @@ func (s *AFPService) handleRename(req *FPRenameReq) (*FPRenameRes, int32) { if errCode != NoErr { return &FPRenameRes{}, errCode } - _, err := s.fs.Stat(oldPath) + backend := s.fsForPath(oldPath) + if backend == nil { + return &FPRenameRes{}, ErrObjectNotFound + } + _, err := backend.Stat(oldPath) if err != nil { return &FPRenameRes{}, ErrObjectNotFound } - err = s.fs.Rename(oldPath, newPath) + err = backend.Rename(oldPath, newPath) if err != nil { return &FPRenameRes{}, ErrAccessDenied } @@ -115,7 +125,7 @@ func (s *AFPService) handleRename(req *FPRenameReq) (*FPRenameRes, int32) { return &FPRenameRes{}, NoErr } -func (s *AFPService) handleGetDirParms(req *FPGetDirParmsReq) (*FPGetDirParmsRes, int32) { +func (s *Service) handleGetDirParms(req *FPGetDirParmsReq) (*FPGetDirParmsRes, int32) { parentPath, ok := s.getDIDPath(req.VolumeID, req.DirID) if !ok && req.DirID != 0 { return &FPGetDirParmsRes{}, ErrObjectNotFound @@ -130,7 +140,11 @@ func (s *AFPService) handleGetDirParms(req *FPGetDirParmsReq) (*FPGetDirParmsRes } targetPath = resolvedPath } - info, err := s.fs.Stat(targetPath) + backend := s.fsForPath(targetPath) + if backend == nil { + return &FPGetDirParmsRes{}, ErrObjectNotFound + } + info, err := backend.Stat(targetPath) if err != nil || !info.IsDir() { return &FPGetDirParmsRes{}, ErrObjectNotFound } @@ -139,7 +153,7 @@ func (s *AFPService) handleGetDirParms(req *FPGetDirParmsReq) (*FPGetDirParmsRes return &FPGetDirParmsRes{Bitmap: req.Bitmap, Data: resData.Bytes()}, NoErr } -func (s *AFPService) handleGetFileParms(req *FPGetFileParmsReq) (*FPGetFileParmsRes, int32) { +func (s *Service) handleGetFileParms(req *FPGetFileParmsReq) (*FPGetFileParmsRes, int32) { parentPath, ok := s.getDIDPath(req.VolumeID, req.DirID) if !ok && req.DirID != 0 { return &FPGetFileParmsRes{}, ErrObjectNotFound @@ -154,7 +168,11 @@ func (s *AFPService) handleGetFileParms(req *FPGetFileParmsReq) (*FPGetFileParms } targetPath = resolvedPath } - info, err := s.fs.Stat(targetPath) + backend := s.fsForPath(targetPath) + if backend == nil { + return &FPGetFileParmsRes{}, ErrObjectNotFound + } + info, err := backend.Stat(targetPath) if err != nil || info.IsDir() { return &FPGetFileParmsRes{}, ErrObjectNotFound } @@ -163,7 +181,7 @@ func (s *AFPService) handleGetFileParms(req *FPGetFileParmsReq) (*FPGetFileParms return &FPGetFileParmsRes{Bitmap: req.Bitmap, Data: resData.Bytes()}, NoErr } -func (s *AFPService) handleSetFileDirParms(req *FPSetFileDirParmsReq) (*FPSetFileDirParmsRes, int32) { +func (s *Service) handleSetFileDirParms(req *FPSetFileDirParmsReq) (*FPSetFileDirParmsRes, int32) { if s.volumeIsReadOnly(req.VolumeID) { return &FPSetFileDirParmsRes{}, ErrVolLocked } @@ -175,7 +193,7 @@ func (s *AFPService) handleSetFileDirParms(req *FPSetFileDirParmsReq) (*FPSetFil return &FPSetFileDirParmsRes{}, NoErr } -func (s *AFPService) handleDelete(req *FPDeleteReq) (*FPDeleteRes, int32) { +func (s *Service) handleDelete(req *FPDeleteReq) (*FPDeleteRes, int32) { if s.fs == nil { return &FPDeleteRes{}, ErrAccessDenied } @@ -186,11 +204,15 @@ func (s *AFPService) handleDelete(req *FPDeleteReq) (*FPDeleteRes, int32) { if errCode != NoErr { return &FPDeleteRes{}, errCode } - _, err := s.fs.Stat(targetPath) + backend := s.fsForPath(targetPath) + if backend == nil { + return &FPDeleteRes{}, ErrObjectNotFound + } + _, err := backend.Stat(targetPath) if err != nil { return &FPDeleteRes{}, ErrObjectNotFound } - if err := s.fs.Remove(targetPath); err != nil { + if err := backend.Remove(targetPath); err != nil { return &FPDeleteRes{}, ErrAccessDenied } s.deleteAppleDoubleSidecar(targetPath) @@ -198,7 +220,7 @@ func (s *AFPService) handleDelete(req *FPDeleteReq) (*FPDeleteRes, int32) { return &FPDeleteRes{}, NoErr } -func (s *AFPService) handleMoveAndRename(req *FPMoveAndRenameReq) (*FPMoveAndRenameRes, int32) { +func (s *Service) handleMoveAndRename(req *FPMoveAndRenameReq) (*FPMoveAndRenameRes, int32) { if s.volumeIsReadOnly(req.VolumeID) { return &FPMoveAndRenameRes{}, ErrVolLocked } @@ -240,12 +262,16 @@ func (s *AFPService) handleMoveAndRename(req *FPMoveAndRenameReq) (*FPMoveAndRen finalName = filepath.Base(srcPath) } dstPath := s.canonicalizePath(filepath.Join(dstParent, finalName)) - _, err := s.fs.Stat(srcPath) + backend := s.fsForPath(srcPath) + if backend == nil { + return &FPMoveAndRenameRes{}, ErrObjectNotFound + } + _, err := backend.Stat(srcPath) if err != nil { return &FPMoveAndRenameRes{}, ErrObjectNotFound } - if err := s.fs.Rename(srcPath, dstPath); err != nil { + if err := backend.Rename(srcPath, dstPath); err != nil { return &FPMoveAndRenameRes{}, ErrAccessDenied } s.moveAppleDoubleSidecar(srcPath, dstPath) @@ -253,7 +279,7 @@ func (s *AFPService) handleMoveAndRename(req *FPMoveAndRenameReq) (*FPMoveAndRen return &FPMoveAndRenameRes{}, NoErr } -func (s *AFPService) handleExchangeFiles(req *FPExchangeFilesReq) (*FPExchangeFilesRes, int32) { +func (s *Service) handleExchangeFiles(req *FPExchangeFilesReq) (*FPExchangeFilesRes, int32) { if s.volumeIsReadOnly(req.VolumeID) { return &FPExchangeFilesRes{}, ErrVolLocked } @@ -277,23 +303,27 @@ func (s *AFPService) handleExchangeFiles(req *FPExchangeFilesReq) (*FPExchangeFi // Three-step atomic swap via temp name. tmpPath := srcPath + ".__afp_swap__" - if err := s.fs.Rename(srcPath, tmpPath); err != nil { + backend := s.fsForPath(srcPath) + if backend == nil { + return &FPExchangeFilesRes{}, ErrObjectNotFound + } + if err := backend.Rename(srcPath, tmpPath); err != nil { return &FPExchangeFilesRes{}, ErrAccessDenied } s.rebindDIDSubtree(req.VolumeID, srcPath, tmpPath) - if err := s.fs.Rename(dstPath, srcPath); err != nil { + if err := backend.Rename(dstPath, srcPath); err != nil { s.rebindDIDSubtree(req.VolumeID, tmpPath, srcPath) - s.fs.Rename(tmpPath, srcPath) // attempt rollback + backend.Rename(tmpPath, srcPath) // attempt rollback return &FPExchangeFilesRes{}, ErrAccessDenied } s.rebindDIDSubtree(req.VolumeID, dstPath, srcPath) - if err := s.fs.Rename(tmpPath, dstPath); err != nil { + if err := backend.Rename(tmpPath, dstPath); err != nil { return &FPExchangeFilesRes{}, ErrAccessDenied } s.rebindDIDSubtree(req.VolumeID, tmpPath, dstPath) if m := s.metaFor(req.VolumeID); m != nil { if err := m.ExchangeMetadata(srcPath, dstPath); err != nil { - log.Printf("[AFP] warning: metadata exchange failed %q <-> %q: %v", srcPath, dstPath, err) + netlog.Debug("[AFP] warning: metadata exchange failed %q <-> %q: %v", srcPath, dstPath, err) } } return &FPExchangeFilesRes{}, NoErr diff --git a/service/afp/filedir_models.go b/service/afp/filedir_models.go index b9c0512..d3fc363 100644 --- a/service/afp/filedir_models.go +++ b/service/afp/filedir_models.go @@ -1,9 +1,12 @@ +//go:build afp || all + package afp import ( - "bytes" "encoding/binary" "fmt" + + "github.com/pgodw/omnitalk/pkg/binutil" ) type FPGetFileDirParmsReq struct { @@ -47,18 +50,45 @@ func (res *FPGetFileDirParmsRes) String() string { return fmt.Sprintf("FPGetFileDirParmsRes{FileBitmap: %s, DirBitmap: %s, IsFile: %t, DataLen: %d}", formatFileBitmap(res.FileBitmap), formatDirBitmap(res.DirBitmap), res.IsFile, len(res.Data)) } -func (res *FPGetFileDirParmsRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.FileBitmap) - binary.Write(buf, binary.BigEndian, res.DirBitmap) +func (res *FPGetFileDirParmsRes) WireSize() int { return 6 + len(res.Data) } + +func (res *FPGetFileDirParmsRes) MarshalWire(b []byte) (int, error) { + off := 0 + n, err := binutil.PutU16(b[off:], res.FileBitmap) + if err != nil { + return 0, err + } + off += n + n, err = binutil.PutU16(b[off:], res.DirBitmap) + if err != nil { + return 0, err + } + off += n + flag := byte(0x80) if res.IsFile { - binary.Write(buf, binary.BigEndian, byte(0x00)) - } else { - binary.Write(buf, binary.BigEndian, byte(0x80)) + flag = 0x00 + } + n, err = binutil.PutU8(b[off:], flag) + if err != nil { + return 0, err + } + off += n + n, err = binutil.PutU8(b[off:], 0x00) + if err != nil { + return 0, err } - binary.Write(buf, binary.BigEndian, byte(0x00)) - buf.Write(res.Data) - return buf.Bytes() + off += n + if len(b[off:]) < len(res.Data) { + return 0, binutil.ErrShortBuffer + } + off += copy(b[off:], res.Data) + return off, nil +} + +func (res *FPGetFileDirParmsRes) Marshal() []byte { + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } // FPMoveAndRename - atomically move and/or rename a file or directory (AFP 2.x section 5.1.23). @@ -293,13 +323,36 @@ type FPGetDirParmsRes struct { Data []byte } +func (res *FPGetDirParmsRes) WireSize() int { return 4 + len(res.Data) } + +func (res *FPGetDirParmsRes) MarshalWire(b []byte) (int, error) { + off := 0 + n, err := binutil.PutU16(b[off:], res.Bitmap) + if err != nil { + return 0, err + } + off += n + n, err = binutil.PutU8(b[off:], 0x80) + if err != nil { + return 0, err + } + off += n + n, err = binutil.PutU8(b[off:], 0x00) + if err != nil { + return 0, err + } + off += n + if len(b[off:]) < len(res.Data) { + return 0, binutil.ErrShortBuffer + } + off += copy(b[off:], res.Data) + return off, nil +} + func (res *FPGetDirParmsRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.Bitmap) - buf.WriteByte(0x80) - buf.WriteByte(0x00) - buf.Write(res.Data) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPGetDirParmsRes) String() string { @@ -340,13 +393,36 @@ type FPGetFileParmsRes struct { Data []byte } +func (res *FPGetFileParmsRes) WireSize() int { return 4 + len(res.Data) } + +func (res *FPGetFileParmsRes) MarshalWire(b []byte) (int, error) { + off := 0 + n, err := binutil.PutU16(b[off:], res.Bitmap) + if err != nil { + return 0, err + } + off += n + n, err = binutil.PutU8(b[off:], 0x00) + if err != nil { + return 0, err + } + off += n + n, err = binutil.PutU8(b[off:], 0x00) + if err != nil { + return 0, err + } + off += n + if len(b[off:]) < len(res.Data) { + return 0, binutil.ErrShortBuffer + } + off += copy(b[off:], res.Data) + return off, nil +} + func (res *FPGetFileParmsRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.Bitmap) - buf.WriteByte(0x00) - buf.WriteByte(0x00) - buf.Write(res.Data) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPGetFileParmsRes) String() string { diff --git a/service/afp/filedir_models_golden_test.go b/service/afp/filedir_models_golden_test.go new file mode 100644 index 0000000..a2dcc0e --- /dev/null +++ b/service/afp/filedir_models_golden_test.go @@ -0,0 +1,64 @@ +//go:build afp || all + +package afp + +import ( + "bytes" + "testing" +) + +func TestFPGetFileDirParmsRes_FileMarshalGolden(t *testing.T) { + t.Parallel() + res := &FPGetFileDirParmsRes{ + FileBitmap: 0x07FB, + DirBitmap: 0x0DFF, + IsFile: true, + Data: []byte{0xAA, 0xBB, 0xCC}, + } + got := res.Marshal() + want := goldenBytes(t, "fpgetfiledirparmsres_file.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +func TestFPGetFileDirParmsRes_DirMarshalGolden(t *testing.T) { + t.Parallel() + res := &FPGetFileDirParmsRes{ + FileBitmap: 0x07FB, + DirBitmap: 0x0DFF, + IsFile: false, + Data: []byte{0x11, 0x22, 0x33, 0x44}, + } + got := res.Marshal() + want := goldenBytes(t, "fpgetfiledirparmsres_dir.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +func TestFPGetDirParmsRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPGetDirParmsRes{ + Bitmap: 0x0DFF, + Data: []byte{0xDE, 0xAD, 0xBE, 0xEF}, + } + got := res.Marshal() + want := goldenBytes(t, "fpgetdirparmsres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +func TestFPGetFileParmsRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPGetFileParmsRes{ + Bitmap: 0x07FB, + Data: []byte{0xCA, 0xFE, 0xBA, 0xBE}, + } + got := res.Marshal() + want := goldenBytes(t, "fpgetfileparmsres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} diff --git a/service/afp/filedir_pack.go b/service/afp/filedir_pack.go new file mode 100644 index 0000000..8629769 --- /dev/null +++ b/service/afp/filedir_pack.go @@ -0,0 +1,284 @@ +//go:build afp || all + +package afp + +import ( + "bytes" + "io/fs" + "path/filepath" + "time" + + "github.com/pgodw/omnitalk/pkg/binutil" +) + +// toAFPTime converts a Go time.Time to AFP's seconds-since-1904 epoch. +// Times before the epoch clamp to 0; overflow clamps to the max uint32. +func toAFPTime(t time.Time) uint32 { + epoch := time.Date(1904, 1, 1, 0, 0, 0, 0, time.Local) + if t.Before(epoch) { + return 0 + } + secs := t.Sub(epoch).Seconds() + if secs > float64(^uint32(0)) { + return ^uint32(0) + } + return uint32(secs) +} + +// File and directory parameter wire packing. The pack functions here +// resolve Service state (CNID, metadata, FS capabilities) and emit the +// AFP 2.x file/directory parameter block layout used by FPGetFileParms, +// FPGetDirParms, FPGetFileDirParms, and FPEnumerate result entries. + +// calcDirParamsSize returns the total byte size of all fixed fields (including +// variable-name offset pointers) for a directory parameter block with the given bitmap. +func calcDirParamsSize(bitmap uint16) int { + size := 0 + if bitmap&DirBitmapAttributes != 0 { + size += 2 + } + if bitmap&DirBitmapParentDID != 0 { + size += 4 + } + if bitmap&DirBitmapCreateDate != 0 { + size += 4 + } + if bitmap&DirBitmapModDate != 0 { + size += 4 + } + if bitmap&DirBitmapBackupDate != 0 { + size += 4 + } + if bitmap&DirBitmapFinderInfo != 0 { + size += 32 + } + if bitmap&DirBitmapLongName != 0 { + size += 2 // offset pointer + } + if bitmap&DirBitmapShortName != 0 { + size += 2 // offset pointer + } + if bitmap&DirBitmapDirID != 0 { + size += 4 + } + if bitmap&DirBitmapOffspringCount != 0 { + size += 2 + } + if bitmap&DirBitmapOwnerID != 0 { + size += 4 + } + if bitmap&DirBitmapGroupID != 0 { + size += 4 + } + if bitmap&DirBitmapAccessRights != 0 { + size += 4 + } + if bitmap&DirBitmapProDOSInfo != 0 { + size += 6 + } + return size +} + +// calcFileParamsSize returns the total byte size of all fixed fields (including +// variable-name offset pointers) for a file parameter block with the given bitmap. +func calcFileParamsSize(bitmap uint16) int { + size := 0 + if bitmap&FileBitmapAttributes != 0 { + size += 2 + } + if bitmap&FileBitmapParentDID != 0 { + size += 4 + } + if bitmap&FileBitmapCreateDate != 0 { + size += 4 + } + if bitmap&FileBitmapModDate != 0 { + size += 4 + } + if bitmap&FileBitmapBackupDate != 0 { + size += 4 + } + if bitmap&FileBitmapFinderInfo != 0 { + size += 32 + } + if bitmap&FileBitmapLongName != 0 { + size += 2 // offset pointer + } + if bitmap&FileBitmapShortName != 0 { + size += 2 // offset pointer + } + if bitmap&FileBitmapFileNum != 0 { + size += 4 + } + if bitmap&FileBitmapDataForkLen != 0 { + size += 4 + } + if bitmap&FileBitmapRsrcForkLen != 0 { + size += 4 + } + if bitmap&FileBitmapProDOSInfo != 0 { + size += 6 + } + return size +} + +func (s *Service) packFileInfo(buf *bytes.Buffer, volumeID uint16, bitmap uint16, parentPath, name string, info fs.FileInfo, isDir bool) { + var varBuf bytes.Buffer + fullPath := filepath.Join(parentPath, name) + name = s.catalogNameForPath(volumeID, fullPath, name) + volFS := s.fsForVolume(volumeID) + + metadata := ForkMetadata{} + if m := s.metaFor(volumeID); m != nil { + if md, err := m.ReadForkMetadata(fullPath); err == nil { + metadata = md + } + } + if !isDir && !hasFinderTypeCreator(metadata.FinderInfo) && s.options.ExtensionMap != nil { + if mapping, ok := s.options.ExtensionMap.Lookup(fullPath); ok { + metadata.FinderInfo = applyExtensionMapping(metadata.FinderInfo, mapping) + } + } + + if isDir { + fixedSize := calcDirParamsSize(bitmap) + + if bitmap&DirBitmapAttributes != 0 { + var dirAttrs uint16 + if volFS != nil && volFS.Capabilities().DirAttributes { + if attrs, err := volFS.DirAttributes(fullPath); err == nil { + dirAttrs = attrs + } + } + binutil.WriteU16(buf, dirAttrs) + } + if bitmap&DirBitmapParentDID != 0 { + // The root directory (DID=2) has a logical parent DID of 1. + var pdir uint32 + thisDID := s.getPathDID(volumeID, fullPath) + if thisDID == CNIDRoot { + pdir = CNIDParentOfRoot + } else { + pdir = s.getPathDID(volumeID, parentPath) + } + binutil.WriteU32(buf, pdir) + } + if bitmap&DirBitmapCreateDate != 0 { + binutil.WriteU32(buf, uint32(toAFPTime(info.ModTime()))) + } + if bitmap&DirBitmapModDate != 0 { + binutil.WriteU32(buf, uint32(toAFPTime(info.ModTime()))) + } + if bitmap&DirBitmapBackupDate != 0 { + binutil.WriteU32(buf, 0) + } + if bitmap&DirBitmapFinderInfo != 0 { + buf.Write(metadata.FinderInfo[:]) + } + if bitmap&DirBitmapLongName != 0 { + offset := uint16(fixedSize + varBuf.Len()) + binutil.WriteU16(buf, offset) + s.writeAFPName(&varBuf, name, volumeID) + } + if bitmap&DirBitmapShortName != 0 { + offset := uint16(fixedSize + varBuf.Len()) + binutil.WriteU16(buf, offset) + s.writeAFPName(&varBuf, name, volumeID) + } + if bitmap&DirBitmapDirID != 0 { + did := s.getPathDID(volumeID, fullPath) + binutil.WriteU32(buf, did) + } + if bitmap&DirBitmapOffspringCount != 0 { + count := uint16(0) + if volFS != nil && volFS.Capabilities().ChildCount { + if cachedCount, err := volFS.ChildCount(fullPath); err == nil { + count = cachedCount + } else if entries, dirErr := volFS.ReadDir(fullPath); dirErr == nil { + for _, e := range entries { + if !s.isMetadataArtifact(e.Name(), e.IsDir(), volumeID) { + count++ + } + } + } + } else if volFS != nil { + if entries, err := volFS.ReadDir(fullPath); err == nil { + for _, e := range entries { + if !s.isMetadataArtifact(e.Name(), e.IsDir(), volumeID) { + count++ + } + } + } + } + binutil.WriteU16(buf, count) + } + if bitmap&DirBitmapOwnerID != 0 { + binutil.WriteU32(buf, 0) + } + if bitmap&DirBitmapGroupID != 0 { + binutil.WriteU32(buf, 0) + } + if bitmap&DirBitmapAccessRights != 0 { + rights := uint32(0x87070707) + if s.volumeIsReadOnly(volumeID) { + // Read-only volumes should advertise read+search rights, not write. + rights = 0x87030303 + } + binutil.WriteU32(buf, rights) + } + if bitmap&DirBitmapProDOSInfo != 0 { + buf.Write(make([]byte, 6)) + } + } else { + fixedSize := calcFileParamsSize(bitmap) + + if bitmap&FileBitmapAttributes != 0 { + attr := uint16(0) + if s.volumeIsReadOnly(volumeID) { + attr |= FileAttrWriteInhibit + } + binutil.WriteU16(buf, attr) + } + if bitmap&FileBitmapParentDID != 0 { + pdir := s.getPathDID(volumeID, parentPath) + binutil.WriteU32(buf, pdir) + } + if bitmap&FileBitmapCreateDate != 0 { + binutil.WriteU32(buf, uint32(toAFPTime(info.ModTime()))) + } + if bitmap&FileBitmapModDate != 0 { + binutil.WriteU32(buf, uint32(toAFPTime(info.ModTime()))) + } + if bitmap&FileBitmapBackupDate != 0 { + binutil.WriteU32(buf, 0) + } + if bitmap&FileBitmapFinderInfo != 0 { + buf.Write(metadata.FinderInfo[:]) + } + if bitmap&FileBitmapLongName != 0 { + offset := uint16(fixedSize + varBuf.Len()) + binutil.WriteU16(buf, offset) + s.writeAFPName(&varBuf, name, volumeID) + } + if bitmap&FileBitmapShortName != 0 { + offset := uint16(fixedSize + varBuf.Len()) + binutil.WriteU16(buf, offset) + s.writeAFPName(&varBuf, name, volumeID) + } + if bitmap&FileBitmapFileNum != 0 { + did := s.getPathDID(volumeID, fullPath) + binutil.WriteU32(buf, did) + } + if bitmap&FileBitmapDataForkLen != 0 { + binutil.WriteU32(buf, uint32(info.Size())) + } + if bitmap&FileBitmapRsrcForkLen != 0 { + binutil.WriteU32(buf, uint32(metadata.ResourceForkLen)) + } + if bitmap&FileBitmapProDOSInfo != 0 { + buf.Write(make([]byte, 6)) + } + } + + buf.Write(varBuf.Bytes()) +} diff --git a/service/afp/fork.go b/service/afp/fork.go index 09b91e8..c4d2a93 100644 --- a/service/afp/fork.go +++ b/service/afp/fork.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -7,13 +9,16 @@ import ( "fmt" "io" "io/fs" - "log" "os" "path/filepath" "syscall" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/pkg/appledouble" + "github.com/pgodw/omnitalk/pkg/binutil" ) -func (s *AFPService) handleOpenFork(req *FPOpenForkReq) (*FPOpenForkRes, int32) { +func (s *Service) handleOpenFork(req *FPOpenForkReq) (*FPOpenForkRes, int32) { parentPath, ok := s.getDIDPath(req.VolumeID, req.DirID) if !ok && req.DirID != 0 { return &FPOpenForkRes{}, ErrObjectNotFound @@ -64,9 +69,13 @@ func (s *AFPService) handleOpenFork(req *FPOpenForkReq) (*FPOpenForkRes, int32) } } else { // Data fork - f, err := s.fs.OpenFile(targetPath, os.O_RDWR) + backend := s.fsForPath(targetPath) + if backend == nil { + return &FPOpenForkRes{}, ErrObjectNotFound + } + f, err := backend.OpenFile(targetPath, os.O_RDWR) if err != nil && req.AccessMode&0x02 == 0 { - f, err = s.fs.OpenFile(targetPath, os.O_RDONLY) + f, err = backend.OpenFile(targetPath, os.O_RDONLY) } if err != nil { return &FPOpenForkRes{}, ErrObjectNotFound @@ -77,11 +86,7 @@ func (s *AFPService) handleOpenFork(req *FPOpenForkReq) (*FPOpenForkRes, int32) handle.volID = req.VolumeID handle.filePath = targetPath - s.mu.Lock() - forkID := s.nextFork - s.nextFork++ - s.forks[forkID] = handle - s.mu.Unlock() + forkID := s.forks.register(handle) forkType := "data" if handle.isRsrc { @@ -91,7 +96,7 @@ func (s *AFPService) handleOpenFork(req *FPOpenForkReq) (*FPOpenForkRes, int32) if req.AccessMode&0x02 == 0 { rwMode = "R/O" } - log.Printf("[AFP] OpenFork forkID=%d %s %s path=%q", forkID, rwMode, forkType, targetPath) + netlog.Debug("[AFP] OpenFork forkID=%d %s %s path=%q", forkID, rwMode, forkType, targetPath) resData := new(bytes.Buffer) s.packFileInfo(resData, req.VolumeID, req.Bitmap, filepath.Dir(targetPath), filepath.Base(targetPath), info, false) @@ -105,23 +110,8 @@ func (s *AFPService) handleOpenFork(req *FPOpenForkReq) (*FPOpenForkRes, int32) return res, NoErr } -func (s *AFPService) handleCloseFork(req *FPCloseForkReq) (*FPCloseForkRes, int32) { - s.mu.Lock() - handle, ok := s.forks[req.OForkRefNum] - if ok { - delete(s.forks, req.OForkRefNum) - if len(s.byteLocks) > 0 { - filtered := s.byteLocks[:0] - for i := range s.byteLocks { - if s.byteLocks[i].ownerFork != req.OForkRefNum { - filtered = append(filtered, s.byteLocks[i]) - } - } - s.byteLocks = filtered - } - } - s.mu.Unlock() - +func (s *Service) handleCloseFork(req *FPCloseForkReq) (*FPCloseForkRes, int32) { + handle, ok := s.forks.close(req.OForkRefNum) if !ok { return &FPCloseForkRes{}, ErrParamErr } @@ -131,25 +121,17 @@ func (s *AFPService) handleCloseFork(req *FPCloseForkReq) (*FPCloseForkRes, int3 return &FPCloseForkRes{}, NoErr } -func (s *AFPService) handleFlush(req *FPFlushReq) (*FPFlushRes, int32) { - s.mu.RLock() - var toSync []*forkHandle - for _, h := range s.forks { +func (s *Service) handleFlush(req *FPFlushReq) (*FPFlushRes, int32) { + for _, h := range s.forks.snapshot() { if h.volID == req.VolumeID && h.file != nil { - toSync = append(toSync, h) + h.file.Sync() //nolint:errcheck } } - s.mu.RUnlock() - for _, h := range toSync { - h.file.Sync() //nolint:errcheck - } return &FPFlushRes{}, NoErr } -func (s *AFPService) handleFlushFork(req *FPFlushForkReq) (*FPFlushForkRes, int32) { - s.mu.RLock() - handle, ok := s.forks[req.OForkRefNum] - s.mu.RUnlock() +func (s *Service) handleFlushFork(req *FPFlushForkReq) (*FPFlushForkRes, int32) { + handle, ok := s.forks.get(req.OForkRefNum) if !ok { return &FPFlushForkRes{}, ErrParamErr } @@ -159,11 +141,10 @@ func (s *AFPService) handleFlushFork(req *FPFlushForkReq) (*FPFlushForkRes, int3 return &FPFlushForkRes{}, NoErr } -func (s *AFPService) handleByteRangeLock(req *FPByteRangeLockReq) (*FPByteRangeLockRes, int32) { - s.mu.Lock() - defer s.mu.Unlock() +func (s *Service) handleByteRangeLock(req *FPByteRangeLockReq) (*FPByteRangeLockRes, int32) { + defer s.forks.lock()() - handle, ok := s.forks[req.ForkID] + handle, ok := s.forks.forks[req.ForkID] if !ok { return &FPByteRangeLockRes{}, ErrParamErr } @@ -202,18 +183,18 @@ func (s *AFPService) handleByteRangeLock(req *FPByteRangeLockReq) (*FPByteRangeL lockKey := byteRangeLockKey(handle) if req.Unlock { - for i := range s.byteLocks { - lk := s.byteLocks[i] + for i := range s.forks.locks { + lk := s.forks.locks[i] if lk.lockKey == lockKey && lk.ownerFork == req.ForkID && lk.start == offset && lk.length == req.Length { - s.byteLocks = append(s.byteLocks[:i], s.byteLocks[i+1:]...) + s.forks.locks = append(s.forks.locks[:i], s.forks.locks[i+1:]...) return &FPByteRangeLockRes{Offset: offset}, NoErr } } return &FPByteRangeLockRes{}, ErrRangeNotLocked } - for i := range s.byteLocks { - lk := s.byteLocks[i] + for i := range s.forks.locks { + lk := s.forks.locks[i] if lk.lockKey != lockKey { continue } @@ -226,11 +207,11 @@ func (s *AFPService) handleByteRangeLock(req *FPByteRangeLockReq) (*FPByteRangeL return &FPByteRangeLockRes{}, ErrLockErr } - if len(s.byteLocks) >= s.maxLocks { + if len(s.forks.locks) >= s.forks.maxLocks { return &FPByteRangeLockRes{}, ErrNoMoreLocks } - s.byteLocks = append(s.byteLocks, byteRangeLock{ + s.forks.locks = append(s.forks.locks, byteRangeLock{ lockKey: lockKey, ownerFork: req.ForkID, start: offset, @@ -270,10 +251,8 @@ func byteRangeEnd(start, length int64) (int64, bool) { return start + length, false } -func (s *AFPService) handleRead(req *FPReadReq) (*FPReadRes, int32) { - s.mu.RLock() - handle, ok := s.forks[req.ForkID] - s.mu.RUnlock() +func (s *Service) handleRead(req *FPReadReq) (*FPReadRes, int32) { + handle, ok := s.forks.get(req.ForkID) if !ok { return &FPReadRes{}, ErrParamErr @@ -284,22 +263,14 @@ func (s *AFPService) handleRead(req *FPReadReq) (*FPReadRes, int32) { if req.ReqCount == 0 { return &FPReadRes{Data: nil}, NoErr } - - // Per AFP-over-ASP spec: a single FPRead response cannot exceed the ASP - // QuantumSize (atpMaxData x 8 = 4624 bytes) because ATP only has a 3-bit - // sequence number. Clients issue additional FPRead calls at successive - // offsets to read more. Clamp here so we never hand more bytes to the - // transport than it can actually deliver - otherwise fragments past seq 7 - // get silently dropped and the client stalls waiting for an ATP retransmit. - const aspQuantumSize = 4624 - if req.ReqCount > aspQuantumSize { - req.ReqCount = aspQuantumSize + if s.maxReadSize > 0 && req.ReqCount > s.maxReadSize { + req.ReqCount = s.maxReadSize } if handle.isRsrc { - log.Printf("[AFP] Read forkID=%d rsrc: rsrcLen=%d req offset=%d count=%d", req.ForkID, handle.rsrcLen, req.Offset, req.ReqCount) + netlog.Debug("[AFP] Read forkID=%d rsrc: rsrcLen=%d req offset=%d count=%d", req.ForkID, handle.rsrcLen, req.Offset, req.ReqCount) if handle.file == nil || handle.rsrcLen == 0 || req.Offset >= handle.rsrcLen { - log.Printf("[AFP] Read forkID=%d rsrc: -> ErrEOFErr (offset past end or empty fork)", req.ForkID) + netlog.Debug("[AFP] Read forkID=%d rsrc: -> ErrEOFErr (offset past end or empty fork)", req.ForkID) return &FPReadRes{}, ErrEOFErr } remaining := handle.rsrcLen - req.Offset @@ -310,18 +281,18 @@ func (s *AFPService) handleRead(req *FPReadReq) (*FPReadRes, int32) { buf := make([]byte, readLen) n, err := handle.file.ReadAt(buf, handle.rsrcOff+req.Offset) if err != nil && err != io.EOF { - log.Printf("[AFP] Read forkID=%d rsrc: ReadAt error: %v", req.ForkID, err) + netlog.Debug("[AFP] Read forkID=%d rsrc: ReadAt error: %v", req.ForkID, err) return &FPReadRes{}, ErrParamErr } if n == 0 { - log.Printf("[AFP] Read forkID=%d rsrc: -> ErrEOFErr (n=0)", req.ForkID) + netlog.Debug("[AFP] Read forkID=%d rsrc: -> ErrEOFErr (n=0)", req.ForkID) return &FPReadRes{}, ErrEOFErr } if int64(n) < int64(req.ReqCount) { - log.Printf("[AFP] Read forkID=%d rsrc: -> %d bytes + ErrEOFErr (partial, requested %d)", req.ForkID, n, req.ReqCount) + netlog.Debug("[AFP] Read forkID=%d rsrc: -> %d bytes + ErrEOFErr (partial, requested %d)", req.ForkID, n, req.ReqCount) return &FPReadRes{Data: buf[:n]}, ErrEOFErr } - log.Printf("[AFP] Read forkID=%d rsrc: -> %d bytes NoErr", req.ForkID, n) + netlog.Debug("[AFP] Read forkID=%d rsrc: -> %d bytes NoErr", req.ForkID, n) return &FPReadRes{Data: buf[:n]}, NoErr } @@ -329,29 +300,27 @@ func (s *AFPService) handleRead(req *FPReadReq) (*FPReadRes, int32) { if fi, err := handle.file.Stat(); err == nil { fileSize = fi.Size() } - log.Printf("[AFP] Read forkID=%d data: fileSize=%d req offset=%d count=%d", req.ForkID, fileSize, req.Offset, req.ReqCount) + netlog.Debug("[AFP] Read forkID=%d data: fileSize=%d req offset=%d count=%d", req.ForkID, fileSize, req.Offset, req.ReqCount) buf := make([]byte, req.ReqCount) n, err := handle.file.ReadAt(buf, req.Offset) if err != nil && err != io.EOF { - log.Printf("[AFP] Read forkID=%d data: ReadAt error: %v", req.ForkID, err) + netlog.Debug("[AFP] Read forkID=%d data: ReadAt error: %v", req.ForkID, err) return &FPReadRes{}, ErrParamErr } if n == 0 { - log.Printf("[AFP] Read forkID=%d data: -> ErrEOFErr (n=0)", req.ForkID) + netlog.Debug("[AFP] Read forkID=%d data: -> ErrEOFErr (n=0)", req.ForkID) return &FPReadRes{}, ErrEOFErr } if n < req.ReqCount { - log.Printf("[AFP] Read forkID=%d data: -> %d bytes + ErrEOFErr (partial, requested %d)", req.ForkID, n, req.ReqCount) + netlog.Debug("[AFP] Read forkID=%d data: -> %d bytes + ErrEOFErr (partial, requested %d)", req.ForkID, n, req.ReqCount) return &FPReadRes{Data: buf[:n]}, ErrEOFErr } - log.Printf("[AFP] Read forkID=%d data: -> %d bytes NoErr", req.ForkID, n) + netlog.Debug("[AFP] Read forkID=%d data: -> %d bytes NoErr", req.ForkID, n) return &FPReadRes{Data: buf[:n]}, NoErr } -func (s *AFPService) handleWrite(req *FPWriteReq) (*FPWriteRes, int32) { - s.mu.RLock() - handle, ok := s.forks[req.ForkID] - s.mu.RUnlock() +func (s *Service) handleWrite(req *FPWriteReq) (*FPWriteRes, int32) { + handle, ok := s.forks.get(req.ForkID) if !ok { return &FPWriteRes{}, ErrParamErr @@ -389,19 +358,19 @@ func (s *AFPService) handleWrite(req *FPWriteReq) (*FPWriteRes, int32) { writeAt = offset } - log.Printf("[AFP] Write forkID=%d isRsrc=%t writeAt=%d dataLen=%d", req.ForkID, handle.isRsrc, writeAt, len(req.WriteData)) + netlog.Debug("[AFP] Write forkID=%d isRsrc=%t writeAt=%d dataLen=%d", req.ForkID, handle.isRsrc, writeAt, len(req.WriteData)) _, err := handle.file.WriteAt(req.WriteData, writeAt) if err != nil { var errno syscall.Errno if errors.As(err, &errno) && errno == syscall.ENOSPC { - log.Printf("[AFP] Write forkID=%d: -> ErrDFull", req.ForkID) + netlog.Debug("[AFP] Write forkID=%d: -> ErrDFull", req.ForkID) return &FPWriteRes{}, ErrDFull } if errors.Is(err, fs.ErrPermission) { - log.Printf("[AFP] Write forkID=%d: -> ErrAccessDenied: %v", req.ForkID, err) + netlog.Debug("[AFP] Write forkID=%d: -> ErrAccessDenied: %v", req.ForkID, err) return &FPWriteRes{}, ErrAccessDenied } - log.Printf("[AFP] Write forkID=%d: -> ErrParamErr: %v", req.ForkID, err) + netlog.Debug("[AFP] Write forkID=%d: -> ErrParamErr: %v", req.ForkID, err) return &FPWriteRes{}, ErrParamErr } @@ -417,7 +386,7 @@ func (s *AFPService) handleWrite(req *FPWriteReq) (*FPWriteRes, int32) { // Update the resource fork length field in the AppleDouble header. lenBuf := make([]byte, 4) binary.BigEndian.PutUint32(lenBuf, uint32(handle.rsrcLen)) - handle.file.WriteAt(lenBuf, adRsrcLenFileOffset) + handle.file.WriteAt(lenBuf, appledouble.ResourceLenFileOffset) } } @@ -433,119 +402,132 @@ func (s *AFPService) handleWrite(req *FPWriteReq) (*FPWriteRes, int32) { } } } - log.Printf("[AFP] Write forkID=%d: -> LastWritten=%d NoErr", req.ForkID, lastWritten) + netlog.Debug("[AFP] Write forkID=%d: -> LastWritten=%d NoErr", req.ForkID, lastWritten) return &FPWriteRes{LastWritten: lastWritten}, NoErr } -func (s *AFPService) handleGetForkParms(req *FPGetForkParmsReq) (*FPGetForkParmsRes, int32) { - s.mu.RLock() - handle, ok := s.forks[req.OForkRefNum] - s.mu.RUnlock() +// handleGetForkParms returns the same parameter block as FPGetFileDirParms +// for the file backing an open fork (AFP 2.x §5.1.27). It must replace +// DataForkLen / RsrcForkLen with the live values tracked on the fork handle: +// in-flight writes may not yet be reflected in Stat or in the AppleDouble +// header. Packing a partial block crashes Finder ("error type 10"). +func (s *Service) handleGetForkParms(req *FPGetForkParmsReq) (*FPGetForkParmsRes, int32) { + handle, ok := s.forks.get(req.OForkRefNum) if !ok { return &FPGetForkParmsRes{}, ErrParamErr } - // Per AFP 2.x section 5.1.27: FPGetForkParms returns file parameters using the - // same File Bitmap as FPGetFileDirParms. Pack the full parameter block - // (ParentDID, LongName, ModDate, etc.) - not just fork lengths - otherwise - // clients that request additional fields will receive a malformed reply - // and mis-parse the response (observed: Finder "error type 10" crash). + if handle.filePath == "" { + // No associated file path (shouldn't happen after OpenFork): fall back + // to the fork-length-only legacy behaviour. + return &FPGetForkParmsRes{Bitmap: req.Bitmap, Data: packForkLengthsOnly(handle, req.Bitmap)}, NoErr + } + + backend := s.fsForPath(handle.filePath) + if backend == nil { + return &FPGetForkParmsRes{}, ErrObjectNotFound + } + info, err := backend.Stat(handle.filePath) + if err != nil { + return &FPGetForkParmsRes{}, ErrObjectNotFound + } resData := new(bytes.Buffer) - if handle.filePath != "" { - info, err := s.fs.Stat(handle.filePath) - if err != nil { - return &FPGetForkParmsRes{}, ErrObjectNotFound - } - parent := filepath.Dir(handle.filePath) - name := filepath.Base(handle.filePath) - s.packFileInfo(resData, handle.volID, req.Bitmap, parent, name, info, false) + parent := filepath.Dir(handle.filePath) + name := filepath.Base(handle.filePath) + s.packFileInfo(resData, handle.volID, req.Bitmap, parent, name, info, false) - // packFileInfo derives DataForkLen from info.Size() and RsrcForkLen - // from the AppleDouble sidecar on disk. For an open fork, the - // authoritative length is the one tracked on the handle (writes may - // not yet be flushed to stat / the AD header is updated separately). - // Overwrite the corresponding fields in-place. - body := resData.Bytes() - off := 0 - if req.Bitmap&FileBitmapAttributes != 0 { - off += 2 - } - if req.Bitmap&FileBitmapParentDID != 0 { - off += 4 - } - if req.Bitmap&FileBitmapCreateDate != 0 { - off += 4 - } - if req.Bitmap&FileBitmapModDate != 0 { - off += 4 - } - if req.Bitmap&FileBitmapBackupDate != 0 { - off += 4 - } - if req.Bitmap&FileBitmapFinderInfo != 0 { - off += 32 - } - if req.Bitmap&FileBitmapLongName != 0 { - off += 2 - } - if req.Bitmap&FileBitmapShortName != 0 { - off += 2 - } - if req.Bitmap&FileBitmapFileNum != 0 { - off += 4 - } - if req.Bitmap&FileBitmapDataForkLen != 0 { - var dataLen uint32 - if !handle.isRsrc && handle.file != nil { - if fi, err := handle.file.Stat(); err == nil { - dataLen = uint32(fi.Size()) - } - } else { - dataLen = binary.BigEndian.Uint32(body[off : off+4]) + body := resData.Bytes() + overwriteLiveForkLengths(body, req.Bitmap, handle) + + netlog.Debug("[AFP] GetForkParms forkID=%d isRsrc=%t bitmap=0x%04x bodyLen=%d", + req.OForkRefNum, handle.isRsrc, req.Bitmap, len(body)) + return &FPGetForkParmsRes{Bitmap: req.Bitmap, Data: body}, NoErr +} + +// overwriteLiveForkLengths patches the DataForkLen / RsrcForkLen fields of +// an already-packed FileBitmap parameter block with the authoritative lengths +// read from the open fork handle. Walks the bitmap in declared field order to +// land on the right offset; fields not selected by the bitmap occupy zero +// bytes in the body. +func overwriteLiveForkLengths(body []byte, bitmap uint16, handle *forkHandle) { + off := 0 + if bitmap&FileBitmapAttributes != 0 { + off += 2 + } + if bitmap&FileBitmapParentDID != 0 { + off += 4 + } + if bitmap&FileBitmapCreateDate != 0 { + off += 4 + } + if bitmap&FileBitmapModDate != 0 { + off += 4 + } + if bitmap&FileBitmapBackupDate != 0 { + off += 4 + } + if bitmap&FileBitmapFinderInfo != 0 { + off += 32 + } + if bitmap&FileBitmapLongName != 0 { + off += 2 + } + if bitmap&FileBitmapShortName != 0 { + off += 2 + } + if bitmap&FileBitmapFileNum != 0 { + off += 4 + } + if bitmap&FileBitmapDataForkLen != 0 { + var dataLen uint32 + if !handle.isRsrc && handle.file != nil { + if fi, err := handle.file.Stat(); err == nil { + dataLen = uint32(fi.Size()) } - binary.BigEndian.PutUint32(body[off:off+4], dataLen) - off += 4 + } else { + dataLen = binary.BigEndian.Uint32(body[off : off+4]) } - if req.Bitmap&FileBitmapRsrcForkLen != 0 { - var rsrcLen uint32 - if handle.isRsrc { - rsrcLen = uint32(handle.rsrcLen) - } else { - rsrcLen = binary.BigEndian.Uint32(body[off : off+4]) - } - binary.BigEndian.PutUint32(body[off:off+4], rsrcLen) + binary.BigEndian.PutUint32(body[off:off+4], dataLen) + off += 4 + } + if bitmap&FileBitmapRsrcForkLen != 0 { + var rsrcLen uint32 + if handle.isRsrc { + rsrcLen = uint32(handle.rsrcLen) + } else { + rsrcLen = binary.BigEndian.Uint32(body[off : off+4]) } - log.Printf("[AFP] GetForkParms forkID=%d isRsrc=%t bitmap=0x%04x bodyLen=%d", - req.OForkRefNum, handle.isRsrc, req.Bitmap, len(body)) - return &FPGetForkParmsRes{Bitmap: req.Bitmap, Data: body}, NoErr + binary.BigEndian.PutUint32(body[off:off+4], rsrcLen) } +} - // No associated file path (shouldn't happen after OpenFork): fall back to - // the fork-length-only legacy behaviour. - var dataLen, rsrcLen uint32 - if req.Bitmap&FileBitmapDataForkLen != 0 { +// packForkLengthsOnly emits the legacy fork-length-only reply used when the +// fork handle has no associated file path. +func packForkLengthsOnly(handle *forkHandle, bitmap uint16) []byte { + resData := new(bytes.Buffer) + if bitmap&FileBitmapDataForkLen != 0 { + var dataLen uint32 if !handle.isRsrc && handle.file != nil { if fi, err := handle.file.Stat(); err == nil { dataLen = uint32(fi.Size()) } } - binary.Write(resData, binary.BigEndian, dataLen) + binutil.WriteU32(resData, dataLen) } - if req.Bitmap&FileBitmapRsrcForkLen != 0 { + if bitmap&FileBitmapRsrcForkLen != 0 { + var rsrcLen uint32 if handle.isRsrc { rsrcLen = uint32(handle.rsrcLen) } - binary.Write(resData, binary.BigEndian, rsrcLen) + binutil.WriteU32(resData, rsrcLen) } - return &FPGetForkParmsRes{Bitmap: req.Bitmap, Data: resData.Bytes()}, NoErr + return resData.Bytes() } -func (s *AFPService) handleSetForkParms(req *FPSetForkParmsReq) (*FPSetForkParmsRes, int32) { - s.mu.RLock() - handle, ok := s.forks[req.OForkRefNum] - s.mu.RUnlock() +func (s *Service) handleSetForkParms(req *FPSetForkParmsReq) (*FPSetForkParmsRes, int32) { + handle, ok := s.forks.get(req.OForkRefNum) if !ok { - log.Printf("[AFP] FPSetForkParms: unknown forkID=%d", req.OForkRefNum) + netlog.Debug("[AFP] FPSetForkParms: unknown forkID=%d", req.OForkRefNum) return &FPSetForkParmsRes{}, ErrParamErr } if s.volumeIsReadOnly(handle.volID) { @@ -570,17 +552,17 @@ func (s *AFPService) handleSetForkParms(req *FPSetForkParmsReq) (*FPSetForkParms return &FPSetForkParmsRes{}, ErrParamErr } if err := handle.file.Truncate(newLen); err != nil { - log.Printf("[AFP] FPSetForkParms: truncate data fork to %d: %v", newLen, err) + netlog.Debug("[AFP] FPSetForkParms: truncate data fork to %d: %v", newLen, err) return &FPSetForkParmsRes{}, ErrMiscErr } - log.Printf("[AFP] FPSetForkParms forkID=%d data newLen=%d", req.OForkRefNum, newLen) + netlog.Debug("[AFP] FPSetForkParms forkID=%d data newLen=%d", req.OForkRefNum, newLen) return &FPSetForkParmsRes{}, NoErr } // Resource fork: truncate the AppleDouble sidecar and update the entry's length field. if handle.file == nil { // Empty-rsrc handle (no sidecar was opened). Accept no-op if newLen==0. - log.Printf("[AFP] FPSetForkParms forkID=%d rsrc (empty handle) newLen=%d", req.OForkRefNum, newLen) + netlog.Debug("[AFP] FPSetForkParms forkID=%d rsrc (empty handle) newLen=%d", req.OForkRefNum, newLen) if newLen == 0 { handle.rsrcLen = 0 return &FPSetForkParmsRes{}, NoErr @@ -597,10 +579,21 @@ func (s *AFPService) handleSetForkParms(req *FPSetForkParmsReq) (*FPSetForkParms Length: handle.rsrcLen, LengthFieldOffset: lenFieldAt, }, newLen); err != nil { - log.Printf("[AFP] FPSetForkParms: truncate rsrc fork to %d: %v", newLen, err) + netlog.Debug("[AFP] FPSetForkParms: truncate rsrc fork to %d: %v", newLen, err) return &FPSetForkParmsRes{}, ErrMiscErr } handle.rsrcLen = newLen - log.Printf("[AFP] FPSetForkParms forkID=%d rsrc newLen=%d rsrcOff=%d lenFieldAt=%d", req.OForkRefNum, newLen, handle.rsrcOff, lenFieldAt) + netlog.Debug("[AFP] FPSetForkParms forkID=%d rsrc newLen=%d rsrcOff=%d lenFieldAt=%d", req.OForkRefNum, newLen, handle.rsrcOff, lenFieldAt) return &FPSetForkParmsRes{}, NoErr } + +// initForkMetadata picks between an injected single ForkMetadataBackend +// (used by tests) and the per-volume map populated by installAppleDoubleBackend +// during volume construction. +func (s *Service) initForkMetadata(options Options) { + if options.ForkMetadataBackend != nil { + s.meta = options.ForkMetadataBackend + return + } + s.metas = make(map[uint16]ForkMetadataBackend) +} diff --git a/service/afp/fork_metadata.go b/service/afp/fork_metadata.go new file mode 100644 index 0000000..3a7c894 --- /dev/null +++ b/service/afp/fork_metadata.go @@ -0,0 +1,59 @@ +//go:build afp || all + +package afp + +import "io/fs" + +// ForkMetadata contains AFP metadata that may be stored outside the data fork. +type ForkMetadata struct { + FinderInfo [32]byte + ResourceForkLen int64 + HasResourceFork bool +} + +// ResourceForkInfo describes where a resource fork lives in backend storage. +type ResourceForkInfo struct { + Offset int64 + Length int64 + LengthFieldOffset int64 +} + +type AppleDoubleMode string + +const ( + AppleDoubleModeModern AppleDoubleMode = "netatalk modern" + AppleDoubleModeLegacy AppleDoubleMode = "netatalk legacy" +) + +// ForkMetadataBackend abstracts where AFP metadata and resource forks are stored. +// The default implementation is AppleDoubleBackend, but other backends can map +// to alternate streams, xattrs, or different sidecar layouts. +type ForkMetadataBackend interface { + StatWithMetadataFallback(path string) (string, fs.FileInfo, error) + ReadForkMetadata(path string) (ForkMetadata, error) + WriteFinderInfo(path string, finderInfo [32]byte) error + OpenResourceFork(path string, writable bool) (File, ResourceForkInfo, error) + TruncateResourceFork(file File, info ResourceForkInfo, newLen int64) error + MoveMetadata(oldpath, newpath string) error + DeleteMetadata(path string) error + CopyMetadata(srcPath, dstPath string) error + CopyMetadataFrom(source ForkMetadataBackend, srcPath, dstPath string) error + ExchangeMetadata(pathA, pathB string) error + IsMetadataArtifact(name string, isDir bool) bool + + // MetadataPath returns the AppleDouble sidecar path for a host file path. + MetadataPath(path string) string + + // IconFileName returns the host filesystem name for the Mac "Icon\r" file, + // accounting for decomposed filenames and AppleDouble mode. + // In legacy mode this is "Icon_"; otherwise "Icon0x0D" (decomposed) or + // "Icon\r" (literal). + IconFileName() string +} + +// CommentBackend can read/write/delete Finder comments stored in sidecar metadata. +type CommentBackend interface { + ReadComment(path string) ([]byte, bool) + WriteComment(path string, comment []byte) error + RemoveComment(path string) error +} diff --git a/service/afp/fork_models.go b/service/afp/fork_models.go index 5123fc5..c8db4a6 100644 --- a/service/afp/fork_models.go +++ b/service/afp/fork_models.go @@ -1,9 +1,12 @@ +//go:build afp || all + package afp import ( - "bytes" "encoding/binary" "fmt" + + "github.com/pgodw/omnitalk/pkg/binutil" ) // Fork type constants for FPOpenFork. @@ -54,12 +57,31 @@ func (res *FPOpenForkRes) String() string { return fmt.Sprintf("FPOpenForkRes{ForkID: %d, Bitmap: %s, DataLen: %d}", res.ForkID, formatFileBitmap(res.Bitmap), len(res.Data)) } +func (res *FPOpenForkRes) WireSize() int { return 4 + len(res.Data) } + +func (res *FPOpenForkRes) MarshalWire(b []byte) (int, error) { + off := 0 + n, err := binutil.PutU16(b[off:], res.Bitmap) + if err != nil { + return 0, err + } + off += n + n, err = binutil.PutU16(b[off:], res.ForkID) + if err != nil { + return 0, err + } + off += n + if len(b[off:]) < len(res.Data) { + return 0, binutil.ErrShortBuffer + } + off += copy(b[off:], res.Data) + return off, nil +} + func (res *FPOpenForkRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.Bitmap) - binary.Write(buf, binary.BigEndian, res.ForkID) - buf.Write(res.Data) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } type FPReadReq struct { @@ -127,10 +149,16 @@ type FPWriteRes struct { LastWritten int64 } +func (res *FPWriteRes) WireSize() int { return 4 } + +func (res *FPWriteRes) MarshalWire(b []byte) (int, error) { + return binutil.PutU32(b, uint32(int32(res.LastWritten))) +} + func (res *FPWriteRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, uint32(int32(res.LastWritten))) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPWriteRes) String() string { @@ -230,10 +258,16 @@ type FPByteRangeLockRes struct { Offset int64 } +func (res *FPByteRangeLockRes) WireSize() int { return 4 } + +func (res *FPByteRangeLockRes) MarshalWire(b []byte) (int, error) { + return binutil.PutU32(b, uint32(int32(res.Offset))) +} + func (res *FPByteRangeLockRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, uint32(int32(res.Offset))) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPByteRangeLockRes) String() string { @@ -265,11 +299,26 @@ type FPGetForkParmsRes struct { Data []byte } +func (res *FPGetForkParmsRes) WireSize() int { return 2 + len(res.Data) } + +func (res *FPGetForkParmsRes) MarshalWire(b []byte) (int, error) { + off := 0 + n, err := binutil.PutU16(b[off:], res.Bitmap) + if err != nil { + return 0, err + } + off += n + if len(b[off:]) < len(res.Data) { + return 0, binutil.ErrShortBuffer + } + off += copy(b[off:], res.Data) + return off, nil +} + func (res *FPGetForkParmsRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.Bitmap) - buf.Write(res.Data) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPGetForkParmsRes) String() string { diff --git a/service/afp/fork_models_golden_test.go b/service/afp/fork_models_golden_test.go new file mode 100644 index 0000000..f4fb4f4 --- /dev/null +++ b/service/afp/fork_models_golden_test.go @@ -0,0 +1,55 @@ +//go:build afp || all + +package afp + +import ( + "bytes" + "testing" +) + +func TestFPOpenForkRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPOpenForkRes{ + Bitmap: 0x07FB, + ForkID: 0x1234, + Data: []byte{0xDE, 0xAD, 0xBE, 0xEF}, + } + got := res.Marshal() + want := goldenBytes(t, "fpopenforkres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +func TestFPWriteRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPWriteRes{LastWritten: 0x12345678} + got := res.Marshal() + want := goldenBytes(t, "fpwriteres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +func TestFPByteRangeLockRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPByteRangeLockRes{Offset: 0x0BADF00D} + got := res.Marshal() + want := goldenBytes(t, "fpbyterangelockres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +func TestFPGetForkParmsRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPGetForkParmsRes{ + Bitmap: 0x0600, + Data: []byte{0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x20, 0x00}, + } + got := res.Marshal() + want := goldenBytes(t, "fpgetforkparmsres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} diff --git a/service/afp/fork_state.go b/service/afp/fork_state.go new file mode 100644 index 0000000..3a70efa --- /dev/null +++ b/service/afp/fork_state.go @@ -0,0 +1,91 @@ +//go:build afp || all + +package afp + +import "sync" + +// forkState owns the open-fork table, the next-fork allocator, and the +// byte-range lock list. AFP fork operations (FPOpenFork / FPCloseFork / +// FPRead / FPWrite / FPByteRangeLock / FPGetForkParms / FPSetForkParms / +// FPFlush*) hammer this state on every active session, so it lives behind +// its own RWMutex to keep auth, desktop, and volume traffic off the same +// contention domain. +type forkState struct { + mu sync.RWMutex + forks map[uint16]*forkHandle + nextFork uint16 + locks []byteRangeLock + maxLocks int +} + +func newForkState(maxLocks int) forkState { + return forkState{ + forks: make(map[uint16]*forkHandle), + nextFork: 1, + locks: make([]byteRangeLock, 0), + maxLocks: maxLocks, + } +} + +// register installs handle and returns the new fork id. +func (f *forkState) register(handle *forkHandle) uint16 { + f.mu.Lock() + defer f.mu.Unlock() + id := f.nextFork + f.nextFork++ + f.forks[id] = handle + return id +} + +// get returns the handle bound to id (or nil + false). Read-locked, suitable +// for the hot Read/Write path. +func (f *forkState) get(id uint16) (*forkHandle, bool) { + f.mu.RLock() + defer f.mu.RUnlock() + h, ok := f.forks[id] + return h, ok +} + +// close drops the fork id, evicts every byte-range lock owned by it, and +// returns the previously-bound handle. The caller is responsible for any +// I/O cleanup (file.Close) outside the lock. +func (f *forkState) close(id uint16) (*forkHandle, bool) { + f.mu.Lock() + defer f.mu.Unlock() + h, ok := f.forks[id] + if !ok { + return nil, false + } + delete(f.forks, id) + if len(f.locks) > 0 { + filtered := f.locks[:0] + for i := range f.locks { + if f.locks[i].ownerFork != id { + filtered = append(filtered, f.locks[i]) + } + } + f.locks = filtered + } + return h, true +} + +// snapshot returns a copy of every currently-open fork handle. Used by +// FPFlush so the actual file.Sync calls can run without holding the fork +// lock. +func (f *forkState) snapshot() []*forkHandle { + f.mu.RLock() + defer f.mu.RUnlock() + out := make([]*forkHandle, 0, len(f.forks)) + for _, h := range f.forks { + out = append(out, h) + } + return out +} + +// lock acquires the write lock and returns an unlock func. The byte-range +// lock state machine in fork.go takes the write lock for the duration of +// its handle validation + lock-list scan + insertion. +func (f *forkState) lock() func() { + f.mu.Lock() + return f.mu.Unlock +} diff --git a/service/afp/fs.go b/service/afp/fs.go index aa7e966..68401d3 100644 --- a/service/afp/fs.go +++ b/service/afp/fs.go @@ -1,29 +1,56 @@ +//go:build afp || all + package afp import ( + "fmt" "io/fs" + "maps" + "slices" + "sync" ) -// ForkMetadata contains AFP metadata that may be stored outside the data fork. -type ForkMetadata struct { - FinderInfo [32]byte - ResourceForkLen int64 - HasResourceFork bool -} +// FileSystemFactory constructs a FileSystem from a normalized +// VolumeConfig. Backends register themselves with RegisterFS during +// package init(). +type FileSystemFactory func(VolumeConfig) (FileSystem, error) + +var ( + fsRegistryMu sync.RWMutex + fsRegistry = map[string]FileSystemFactory{} +) -// ResourceForkInfo describes where a resource fork lives in backend storage. -type ResourceForkInfo struct { - Offset int64 - Length int64 - LengthFieldOffset int64 +// RegisterFS associates an FSType name with its factory. It is safe to +// call from package init() blocks; a duplicate name panics so missing +// build tags surface immediately rather than silently overriding the +// default backend. +func RegisterFS(name string, f FileSystemFactory) { + fsRegistryMu.Lock() + defer fsRegistryMu.Unlock() + if _, exists := fsRegistry[name]; exists { + panic(fmt.Sprintf("afp: FileSystem %q already registered", name)) + } + fsRegistry[name] = f } -type AppleDoubleMode string +// NewFS dispatches to the factory registered for cfg.FSType. The +// returned error includes the list of registered names when no +// factory matches. +func NewFS(cfg VolumeConfig) (FileSystem, error) { + fsRegistryMu.RLock() + f, ok := fsRegistry[cfg.FSType] + fsRegistryMu.RUnlock() + if !ok { + return nil, fmt.Errorf("afp: no FileSystem registered for fs_type %q (registered: %v)", cfg.FSType, registeredFSNames()) + } + return f(cfg) +} -const ( - AppleDoubleModeModern AppleDoubleMode = "netatalk modern" - AppleDoubleModeLegacy AppleDoubleMode = "netatalk legacy" -) +func registeredFSNames() []string { + fsRegistryMu.RLock() + defer fsRegistryMu.RUnlock() + return slices.Sorted(maps.Keys(fsRegistry)) +} type FileSystem interface { ReadDir(path string) ([]fs.DirEntry, error) @@ -34,39 +61,23 @@ type FileSystem interface { OpenFile(path string, flag int) (File, error) Remove(path string) error Rename(oldpath, newpath string) error + Capabilities() FileSystemCapabilities + CatSearch(volumeRoot string, query string, reqMatches int32, cursor [16]byte) ([]string, [16]byte, int32) + ChildCount(path string) (uint16, error) + ReadDirRange(path string, startIndex uint16, reqCount uint16) ([]fs.DirEntry, uint16, error) + DirAttributes(path string) (uint16, error) + IsReadOnly(path string) (bool, error) + SupportsCatSearch(path string) (bool, error) } -// ForkMetadataBackend abstracts where AFP metadata and resource forks are stored. -// The default implementation is AppleDoubleBackend, but other backends can map -// to alternate streams, xattrs, or different sidecar layouts. -type ForkMetadataBackend interface { - StatWithMetadataFallback(path string) (string, fs.FileInfo, error) - ReadForkMetadata(path string) (ForkMetadata, error) - WriteFinderInfo(path string, finderInfo [32]byte) error - OpenResourceFork(path string, writable bool) (File, ResourceForkInfo, error) - TruncateResourceFork(file File, info ResourceForkInfo, newLen int64) error - MoveMetadata(oldpath, newpath string) error - DeleteMetadata(path string) error - CopyMetadata(srcPath, dstPath string) error - CopyMetadataFrom(source ForkMetadataBackend, srcPath, dstPath string) error - ExchangeMetadata(pathA, pathB string) error - IsMetadataArtifact(name string, isDir bool) bool - - // MetadataPath returns the AppleDouble sidecar path for a host file path. - MetadataPath(path string) string - - // IconFileName returns the host filesystem name for the Mac "Icon\r" file, - // accounting for decomposed filenames and AppleDouble mode. - // In legacy mode this is "Icon_"; otherwise "Icon0x0D" (decomposed) or - // "Icon\r" (literal). - IconFileName() string -} - -// CommentBackend can read/write/delete Finder comments stored in sidecar metadata. -type CommentBackend interface { - ReadComment(path string) ([]byte, bool) - WriteComment(path string, comment []byte) error - RemoveComment(path string) error +// FileSystemCapabilities describes optional AFP behaviors a FileSystem +// implementation supports. +type FileSystemCapabilities struct { + CatSearch bool + ChildCount bool + ReadDirRange bool + DirAttributes bool + ReadOnlyState bool } type File interface { diff --git a/service/afp/getfiledirparms_error_response_test.go b/service/afp/getfiledirparms_error_response_test.go index 906da6c..57e6ec6 100644 --- a/service/afp/getfiledirparms_error_response_test.go +++ b/service/afp/getfiledirparms_error_response_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -7,7 +9,7 @@ import ( func TestHandleGetFileDirParms_ObjectNotFoundReturnsStructuredResponse(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) req := &FPGetFileDirParmsReq{ VolumeID: 1, @@ -43,7 +45,7 @@ func TestHandleGetFileDirParms_ObjectNotFoundReturnsStructuredResponse(t *testin func TestHandleGetFileDirParms_ObjectNotFoundDirOnlyRequestUsesDirFlag(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) req := &FPGetFileDirParmsReq{ VolumeID: 1, diff --git a/service/afp/getfiledirparms_validation_test.go b/service/afp/getfiledirparms_validation_test.go index 314de65..c4a0351 100644 --- a/service/afp/getfiledirparms_validation_test.go +++ b/service/afp/getfiledirparms_validation_test.go @@ -1,10 +1,12 @@ +//go:build afp || all + package afp import "testing" func TestHandleGetFileDirParms_RejectsZeroBitmaps(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) res, errCode := s.handleGetFileDirParms(&FPGetFileDirParmsReq{ VolumeID: 1, @@ -24,7 +26,7 @@ func TestHandleGetFileDirParms_RejectsZeroBitmaps(t *testing.T) { func TestHandleGetFileDirParms_RejectsUnsupportedBitmapBits(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) // Bit 14 is not supported by our packer and must not be accepted. unsupported := uint16(1 << 14) @@ -46,7 +48,7 @@ func TestHandleGetFileDirParms_RejectsUnsupportedBitmapBits(t *testing.T) { func TestHandleGetFileDirParms_RejectsInvalidPathTypeWhenPathPresent(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) res, errCode := s.handleGetFileDirParms(&FPGetFileDirParmsReq{ VolumeID: 1, diff --git a/service/afp/icon_resourcefork.go b/service/afp/icon_resourcefork.go index b88d23d..46bb470 100644 --- a/service/afp/icon_resourcefork.go +++ b/service/afp/icon_resourcefork.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp // BNDL/FREF/ICN# extraction on top of the generic resource-fork parser in diff --git a/service/afp/info.go b/service/afp/info.go index a4580b0..5beea0d 100644 --- a/service/afp/info.go +++ b/service/afp/info.go @@ -1,8 +1,11 @@ +//go:build afp || all + package afp import ( "bytes" - "encoding/binary" + + "github.com/pgodw/omnitalk/pkg/binutil" ) // BuildServerInfo constructs the payload for an AFP FPGetSrvrInfo or ASP GetStatus reply. @@ -53,14 +56,14 @@ func BuildServerInfo(serverName string) []byte { // Write Offsets // For FPGetSrvrInfo, the layout requires exactly 4 offsets. - binary.Write(buf, binary.BigEndian, uint16(machineOffset)) - binary.Write(buf, binary.BigEndian, uint16(versionsOffset)) - binary.Write(buf, binary.BigEndian, uint16(uamsOffset)) - binary.Write(buf, binary.BigEndian, uint16(iconOffset)) + binutil.WriteU16(buf, uint16(machineOffset)) + binutil.WriteU16(buf, uint16(versionsOffset)) + binutil.WriteU16(buf, uint16(uamsOffset)) + binutil.WriteU16(buf, uint16(iconOffset)) // Write Flags flags := uint16(0x0001 | 0x0002) // Supports CopyFile, Supports Choose Message (example flags) - binary.Write(buf, binary.BigEndian, flags) + binutil.WriteU16(buf, flags) // Write Server Name (Pascal String) buf.WriteByte(byte(len(serverName))) diff --git a/service/afp/info_test.go b/service/afp/info_test.go index bccb959..3116cb2 100644 --- a/service/afp/info_test.go +++ b/service/afp/info_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( diff --git a/service/afp/loadconfig.go b/service/afp/loadconfig.go new file mode 100644 index 0000000..247ac59 --- /dev/null +++ b/service/afp/loadconfig.go @@ -0,0 +1,46 @@ +//go:build afp || all + +package afp + +import ( + "fmt" + "path/filepath" + "strings" +) + +// ParseAppleDoubleMode parses an "appledouble_mode" config value. +func ParseAppleDoubleMode(value string) (AppleDoubleMode, error) { + switch strings.ToLower(strings.TrimSpace(value)) { + case "", "modern", string(AppleDoubleModeModern): + return AppleDoubleModeModern, nil + case "legacy", string(AppleDoubleModeLegacy): + return AppleDoubleModeLegacy, nil + default: + return "", fmt.Errorf("appledouble_mode must be modern or legacy, got %q", value) + } +} + +// DefaultMacGardenVolumePath derives a filesystem-safe default path for a +// MacGarden-backed volume that did not specify one. +func DefaultMacGardenVolumePath(name string) string { + safe := strings.Map(func(r rune) rune { + switch { + case r >= 'a' && r <= 'z': + return r + case r >= 'A' && r <= 'Z': + return r + case r >= '0' && r <= '9': + return r + case r == '-' || r == '_': + return r + case r == ' ': + return '_' + default: + return -1 + } + }, strings.TrimSpace(name)) + if safe == "" { + safe = "MacGarden" + } + return filepath.Join(".macgarden", safe) +} diff --git a/service/afp/local_fs.go b/service/afp/local_fs.go index 16cd1bd..f457620 100644 --- a/service/afp/local_fs.go +++ b/service/afp/local_fs.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -7,6 +9,12 @@ import ( type LocalFileSystem struct{} +func init() { + RegisterFS(FSTypeLocalFS, func(cfg VolumeConfig) (FileSystem, error) { + return &LocalFileSystem{}, nil + }) +} + // LocalFileSystem expects already-converted UTF-8 host paths from AFP service logic. func (l *LocalFileSystem) ReadDir(path string) ([]fs.DirEntry, error) { @@ -48,3 +56,42 @@ func (l *LocalFileSystem) Remove(path string) error { func (l *LocalFileSystem) Rename(oldpath, newpath string) error { return os.Rename(oldpath, newpath) } + +func (l *LocalFileSystem) Capabilities() FileSystemCapabilities { + return FileSystemCapabilities{ + ChildCount: true, + DirAttributes: true, + ReadOnlyState: true, + } +} + +func (l *LocalFileSystem) CatSearch(_ string, _ string, _ int32, cursor [16]byte) ([]string, [16]byte, int32) { + return nil, cursor, ErrCallNotSupported +} + +func (l *LocalFileSystem) ChildCount(path string) (uint16, error) { + entries, err := os.ReadDir(path) + if err != nil { + return 0, err + } + if len(entries) > 0xffff { + return 0xffff, nil + } + return uint16(len(entries)), nil +} + +func (l *LocalFileSystem) ReadDirRange(path string, startIndex uint16, reqCount uint16) ([]fs.DirEntry, uint16, error) { + return nil, 0, newNotSupported("ReadDirRange") +} + +func (l *LocalFileSystem) DirAttributes(_ string) (uint16, error) { + return 0, nil +} + +func (l *LocalFileSystem) IsReadOnly(_ string) (bool, error) { + return false, nil +} + +func (l *LocalFileSystem) SupportsCatSearch(_ string) (bool, error) { + return false, nil +} diff --git a/service/afp/logging.go b/service/afp/logging.go new file mode 100644 index 0000000..7de473c --- /dev/null +++ b/service/afp/logging.go @@ -0,0 +1,86 @@ +//go:build afp || all + +package afp + +import ( + "github.com/pgodw/omnitalk/netlog" + "fmt" +) + +func (s *Service) logPacket(format string, args ...any) { + msg := fmt.Sprintf(format, args...) + if s.dumper != nil { + s.dumper.LogPacket(msg) + } +} + +func (s *Service) logResolvedPaths(req Request) { + switch r := req.(type) { + case *FPOpenDirReq: + s.logResolvedPath("FPOpenDir", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPEnumerateReq: + s.logResolvedPath("FPEnumerate", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPGetFileDirParmsReq: + s.logResolvedPath("FPGetFileDirParms", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPGetDirParmsReq: + s.logResolvedPath("FPGetDirParms", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPGetFileParmsReq: + s.logResolvedPath("FPGetFileParms", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPOpenForkReq: + s.logResolvedPath("FPOpenFork", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPCreateFileReq: + s.logResolvedPath("FPCreateFile", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPCreateDirReq: + s.logResolvedPath("FPCreateDir", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPDeleteReq: + s.logResolvedPath("FPDelete", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPSetDirParmsReq: + s.logResolvedPath("FPSetDirParms", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPSetFileParmsReq: + s.logResolvedPath("FPSetFileParms", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPSetFileDirParmsReq: + s.logResolvedPath("FPSetFileDirParms", r.VolumeID, r.DirID, r.PathType, r.Path) + case *FPRenameReq: + s.logResolvedPath("FPRename old", r.VolumeID, r.DirID, r.PathType, r.Name) + s.logResolvedPath("FPRename new", r.VolumeID, r.DirID, r.NewPathType, r.NewName) + case *FPMoveAndRenameReq: + s.logResolvedPath("FPMoveAndRename src", r.VolumeID, r.SrcDirID, r.SrcPathType, r.SrcName) + s.logResolvedPath("FPMoveAndRename dstDir", r.VolumeID, r.DstDirID, r.DstPathType, r.DstDirName) + case *FPExchangeFilesReq: + s.logResolvedPath("FPExchangeFiles src", r.VolumeID, r.SrcDirID, r.SrcPathType, r.SrcName) + s.logResolvedPath("FPExchangeFiles dst", r.VolumeID, r.DstDirID, r.DstPathType, r.DstName) + case *FPCopyFileReq: + s.logResolvedPath("FPCopyFile src", r.SrcVolumeID, r.SrcDirID, r.SrcPathType, r.SrcName) + s.logResolvedPath("FPCopyFile dstDir", r.DstVolumeID, r.DstDirID, r.DstPathType, r.DstDirName) + case *FPAddAPPLReq: + s.logResolvedPathFromDTRef("FPAddAPPL", r.DTRefNum, r.DirID, r.PathType, r.Path) + case *FPRemoveAPPLReq: + s.logResolvedPathFromDTRef("FPRemoveAPPL", r.DTRefNum, r.DirID, r.PathType, r.Path) + case *FPAddCommentReq: + s.logResolvedPathFromDTRef("FPAddComment", r.DTRefNum, r.DirID, r.PathType, r.Path) + case *FPRemoveCommentReq: + s.logResolvedPathFromDTRef("FPRemoveComment", r.DTRefNum, r.DirID, r.PathType, r.Path) + case *FPGetCommentReq: + s.logResolvedPathFromDTRef("FPGetComment", r.DTRefNum, r.DirID, r.PathType, r.Path) + case *FPCatSearchReq: + s.logResolvedPath("FPCatSearch", r.VolumeID, CNIDRoot, PathTypeLongNames, "") + } +} + +func (s *Service) logResolvedPath(op string, volumeID uint16, dirID uint32, pathType uint8, rawPath string) { + resolved, errCode := s.resolveVolumePath(volumeID, dirID, rawPath, pathType) + if errCode == NoErr { + netlog.Debug("[AFP][Path] %s vol=%d dirID=%d pathType=%d raw=%q resolved=%q", op, volumeID, dirID, pathType, rawPath, resolved) + return + } + netlog.Debug("[AFP][Path] %s vol=%d dirID=%d pathType=%d raw=%q unresolved err=%d", op, volumeID, dirID, pathType, rawPath, errCode) +} + +func (s *Service) logResolvedPathFromDTRef(op string, dtRefNum uint16, dirID uint32, pathType uint8, rawPath string) { + volID, ok := s.desktop.volumeOf(dtRefNum) + if !ok { + netlog.Debug("[AFP][Path] %s dtRef=%d dirID=%d pathType=%d raw=%q unresolved err=%d", op, dtRefNum, dirID, pathType, rawPath, ErrParamErr) + return + } + s.logResolvedPath(op, volID, dirID, pathType, rawPath) +} diff --git a/service/afp/macgarden_fs_stub.go b/service/afp/macgarden_fs_stub.go new file mode 100644 index 0000000..cb78021 --- /dev/null +++ b/service/afp/macgarden_fs_stub.go @@ -0,0 +1,17 @@ +//go:build afp && !macgarden + +package afp + +import ( + "errors" +) + +// ErrMacGardenDisabled is returned when a volume is configured with +// fs_type = "macgarden" in a binary built without the "macgarden" build tag. +var ErrMacGardenDisabled = errors.New("macgarden backend not built; rebuild with -tags macgarden") + +func init() { + RegisterFS(FSTypeMacGarden, func(_ VolumeConfig) (FileSystem, error) { + return nil, ErrMacGardenDisabled + }) +} diff --git a/service/afp/metadata.go b/service/afp/metadata.go new file mode 100644 index 0000000..ee0c883 --- /dev/null +++ b/service/afp/metadata.go @@ -0,0 +1,108 @@ +//go:build afp || all + +package afp + +import ( + "io/fs" + "os" + "path/filepath" + "strings" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/pkg/cnid" +) + +// AppleDouble sidecar / hidden-name / icon canonicalisation helpers. +// These bridge the AFP-visible filesystem (which never sees ._sidecar +// files, .AppleDouble folders, or per-volume CNID databases) and the +// host filesystem where those artefacts physically live. + +func (s *Service) statPathWithAppleDoubleFallback(path string) (string, fs.FileInfo, error) { + m := s.metaForPath(path) + if m == nil { + return path, nil, os.ErrNotExist + } + return m.StatWithMetadataFallback(path) +} + +// iconFileNameFor returns the host filesystem name for the Mac "Icon\r" file +// for the given volume, respecting its AppleDouble mode and decomposed filename settings. +func (s *Service) iconFileNameFor(volID uint16) string { + if m := s.metaFor(volID); m != nil { + return m.IconFileName() + } + if s.options.DecomposedFilenames { + return "Icon0x0D" + } + return "Icon\r" +} + +// canonicalizePath remaps any Icon\r variant in path to the canonical host +// name for the configured backend (e.g. Icon0x0D→Icon_ in legacy mode). +// This is applied during path resolution so both reads and writes use the +// correct on-disk name without duplicating the alias logic in every handler. +func (s *Service) canonicalizePath(path string) string { + m := s.metaForPath(path) + if m == nil { + return path + } + base := filepath.Base(path) + canonical := m.IconFileName() + if isIconFile(base) && base != canonical { + return filepath.Join(filepath.Dir(path), canonical) + } + return path +} + +// alwaysHiddenNames lists directory and file names that are always hidden from +// AFP clients regardless of volume backend or AppleDouble mode. Names are +// matched case-insensitively. +var alwaysHiddenNames = []string{ + ".appledesktop", + ".appledouble", +} + +func (s *Service) isMetadataArtifact(name string, isDir bool, volID uint16) bool { + if !isDir && strings.EqualFold(name, cnid.SQLiteFilename) { + return true + } + for _, hidden := range alwaysHiddenNames { + if strings.EqualFold(name, hidden) { + return true + } + } + if m := s.metaFor(volID); m != nil { + return m.IsMetadataArtifact(name, isDir) + } + return strings.HasPrefix(name, "._") +} + +// moveAppleDoubleSidecar renames an AppleDouble sidecar (._name) alongside a +// primary file rename/move. This is best-effort: missing sidecars are silently +// ignored, and unexpected errors are logged but not returned to the caller so +// that a sidecar failure never causes the already-completed primary operation +// to report an error to the client. +func (s *Service) moveAppleDoubleSidecar(oldPath, newPath string) error { + m := s.metaForPath(oldPath) + if m == nil { + return nil + } + if err := m.MoveMetadata(oldPath, newPath); err != nil { + netlog.Debug("[AFP] warning: could not move metadata %s → %s: %v", oldPath, newPath, err) + } + return nil +} + +// deleteAppleDoubleSidecar removes a file's AppleDouble sidecar. This is +// best-effort: missing sidecars are silently ignored, and unexpected errors +// are logged but not returned to the caller. +func (s *Service) deleteAppleDoubleSidecar(path string) error { + m := s.metaForPath(path) + if m == nil { + return nil + } + if err := m.DeleteMetadata(path); err != nil { + netlog.Debug("[AFP] warning: could not delete metadata for %s: %v", path, err) + } + return nil +} diff --git a/service/afp/metrics.go b/service/afp/metrics.go new file mode 100644 index 0000000..c2f119d --- /dev/null +++ b/service/afp/metrics.go @@ -0,0 +1,7 @@ +//go:build afp || all + +package afp + +import "github.com/pgodw/omnitalk/pkg/telemetry" + +var afpCommandsTotal = telemetry.NewCounter("omnitalk_afp_commands_total") diff --git a/service/afp/model_interfaces.go b/service/afp/model_interfaces.go index 722e6ca..f0eded8 100644 --- a/service/afp/model_interfaces.go +++ b/service/afp/model_interfaces.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp // RequestModel is implemented by decoded AFP request payload types. diff --git a/service/afp/operations.go b/service/afp/operations.go index 4717b46..8f75cb3 100644 --- a/service/afp/operations.go +++ b/service/afp/operations.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( diff --git a/service/afp/pascal_string.go b/service/afp/pascal_string.go index 7792786..33a0418 100644 --- a/service/afp/pascal_string.go +++ b/service/afp/pascal_string.go @@ -1,6 +1,8 @@ +//go:build afp || all + package afp -import "github.com/pgodw/omnitalk/go/appletalk" +import "github.com/pgodw/omnitalk/pkg/encoding" // ReadPascalString reads a length-prefixed MacRoman string at idx and returns UTF-8 text plus bytes consumed. func ReadPascalString(data []byte, idx int) (string, int) { @@ -11,12 +13,12 @@ func ReadPascalString(data []byte, idx int) (string, int) { if idx+1+length > len(data) { return "", 0 } - return appletalk.MacRomanToUTF8(data[idx+1 : idx+1+length]), length + 1 + return encoding.MacRomanToUTF8(data[idx+1 : idx+1+length]), length + 1 } // WritePascalString appends a UTF-8 string as a Pascal-style MacRoman string. func WritePascalString(dst []byte, value string) []byte { - encoded := appletalk.UTF8ToMacRoman(value) + encoded := encoding.UTF8ToMacRoman(value) if len(encoded) > 255 { encoded = encoded[:255] } diff --git a/service/afp/path_codec.go b/service/afp/path_codec.go index dc62146..eb090a0 100644 --- a/service/afp/path_codec.go +++ b/service/afp/path_codec.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -7,11 +9,11 @@ import ( "strings" "unicode/utf8" - "github.com/pgodw/omnitalk/go/appletalk" + "github.com/pgodw/omnitalk/pkg/encoding" ) // AFPOptions controls AFP filename/path translation behavior. -type AFPOptions struct { +type Options struct { // DecomposedFilenames enables host-reserved character escaping using 0xNN tokens. DecomposedFilenames bool // CNIDBackend selects the CNID backend by name. The default is "sqlite". @@ -28,21 +30,23 @@ type AFPOptions struct { ExtensionMap *ExtensionMap // ForkMetadataBackend overrides AppleDoubleMode with a concrete backend. ForkMetadataBackend ForkMetadataBackend + // PersistentVolumeIDs assigns stable volume IDs derived from volume names. + PersistentVolumeIDs bool } -func DefaultAFPOptions() AFPOptions { - return AFPOptions{DecomposedFilenames: true, CNIDBackend: "sqlite", DesktopBackend: "sqlite", AppleDoubleMode: defaultAppleDoubleMode} +func DefaultOptions() Options { + return Options{DecomposedFilenames: true, CNIDBackend: "sqlite", DesktopBackend: "sqlite", AppleDoubleMode: defaultAppleDoubleMode} } -func (s *AFPService) afpPathElementToHost(raw string) string { - decoded := appletalk.MacRomanToUTF8([]byte(raw)) +func (s *Service) afpPathElementToHost(raw string) string { + decoded := encoding.MacRomanToUTF8([]byte(raw)) if !s.options.DecomposedFilenames { return decoded } return encodeHostReservedChars(decoded) } -func (s *AFPService) hostNameToAFPBytes(hostName string, volID uint16) []byte { +func (s *Service) hostNameToAFPBytes(hostName string, volID uint16) []byte { name := hostName // In legacy AppleDouble mode the Icon\r file is stored on disk as "Icon_". // Before encoding back to AFP we need to restore the original Mac name. @@ -52,10 +56,10 @@ func (s *AFPService) hostNameToAFPBytes(hostName string, volID uint16) []byte { if s.options.DecomposedFilenames { name = decodeHostReservedTokens(name) } - return appletalk.UTF8ToMacRoman(name) + return encoding.UTF8ToMacRoman(name) } -func (s *AFPService) writeAFPName(buf *bytes.Buffer, hostName string, volID uint16) { +func (s *Service) writeAFPName(buf *bytes.Buffer, hostName string, volID uint16) { nameBytes := s.hostNameToAFPBytes(hostName, volID) if len(nameBytes) > 255 { nameBytes = nameBytes[:255] diff --git a/service/afp/path_codec_test.go b/service/afp/path_codec_test.go index ab8b215..348dc87 100644 --- a/service/afp/path_codec_test.go +++ b/service/afp/path_codec_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -6,7 +8,7 @@ import ( ) func TestWriteAFPName_EncodesToMacRoman(t *testing.T) { - s := NewAFPService("TestServer", nil, nil, nil) + s := NewService("TestServer", nil, nil, nil) var buf bytes.Buffer s.writeAFPName(&buf, "tm™", 0) @@ -18,7 +20,7 @@ func TestWriteAFPName_EncodesToMacRoman(t *testing.T) { } func TestHostTokenRoundTrip_WhenEnabled(t *testing.T) { - s := NewAFPService("TestServer", nil, nil, nil, AFPOptions{DecomposedFilenames: true}) + s := NewService("TestServer", nil, nil, nil, Options{DecomposedFilenames: true}) host := s.afpPathElementToHost("Hello/World") if host != "Hello0x2FWorld" { diff --git a/service/afp/paths.go b/service/afp/paths.go new file mode 100644 index 0000000..43a95c8 --- /dev/null +++ b/service/afp/paths.go @@ -0,0 +1,137 @@ +//go:build afp || all + +package afp + +import ( + "github.com/pgodw/omnitalk/netlog" + "path/filepath" + "strings" +) + +// CNID-backed path/DID resolution and AFP path-string parsing. The +// helpers here translate between AFP pathnames (null-separated, with +// consecutive nulls ascending the tree) and host filesystem paths, +// and between Catalog Node IDs and the path strings they index. + +func (s *Service) cnidStore(volumeID uint16) (CNIDStore, bool) { + store, ok := s.cnidStores[volumeID] + return store, ok +} + +func (s *Service) getPathDID(volumeID uint16, path string) uint32 { + store, ok := s.cnidStore(volumeID) + if !ok { + return CNIDInvalid + } + return store.Ensure(path) +} + +func (s *Service) getDIDPath(volumeID uint16, did uint32) (string, bool) { + store, ok := s.cnidStore(volumeID) + if !ok { + return "", false + } + return store.Path(did) +} + +func (s *Service) resolveDIDPath(volumeID uint16, did uint32) (string, bool) { + if did == CNIDInvalid { + return "", false + } + return s.getDIDPath(volumeID, did) +} + +func (s *Service) rebindDIDSubtree(volumeID uint16, oldPath, newPath string) { + store, ok := s.cnidStore(volumeID) + if !ok { + return + } + store.Rebind(oldPath, newPath) +} + +func (s *Service) removeDIDSubtree(volumeID uint16, path string) { + store, ok := s.cnidStore(volumeID) + if !ok { + return + } + store.Remove(path) +} + +func (s *Service) resolvePath(parentPath, name string, pathType uint8) (string, int32) { + if pathType == 1 { + // Short names are not supported. + return "", ErrObjectNotFound + } + + // AFP pathnames are separated by null bytes (\x00). + // A single leading null byte is ignored. + if len(name) > 0 && name[0] == '\x00' { + name = name[1:] + } + + // A pathname string is composed of CNode names separated by null bytes. + // Consecutive null bytes ascend the directory tree: + // Two consecutive null bytes ascend one level. + // Three consecutive null bytes ascend two levels, etc. + elements := strings.Split(name, "\x00") + currentPath := parentPath + + for i := 0; i < len(elements); i++ { + el := elements[i] + if el == "" { + // Empty element means a null byte following another null byte (or a leading/trailing one). + // If it's the last element, it represents a trailing null byte which we can ignore. + if i == len(elements)-1 { + continue + } + // Each consecutive null byte (after the first separator) means ascending one level. + // "To ascend one level... two consecutive null bytes should follow the offspring CNode name." + // If we see an empty string here, it corresponds to ascending. + currentPath = filepath.Dir(currentPath) + } else { + hostEl := s.afpPathElementToHost(el) + if hostEl == ".." { + return "", ErrAccessDenied + } + if !s.options.DecomposedFilenames && hasHostReservedChar(hostEl) { + return "", ErrAccessDenied + } + currentPath = s.canonicalizePath(filepath.Join(currentPath, hostEl)) + } + } + + fullPath := filepath.Clean(currentPath) + + for _, vol := range s.Volumes { + rel, err := filepath.Rel(vol.Config.Path, fullPath) + if err == nil && !strings.HasPrefix(rel, "..") { + return fullPath, NoErr + } + } + return "", ErrAccessDenied +} + +func (s *Service) resolveSetPath(volumeID uint16, dirID uint32, path string, pathType uint8) (string, int32) { + parentPath, ok := s.resolveDIDPath(volumeID, dirID) + if !ok && dirID != 0 { + return "", ErrObjectNotFound + } else if !ok { + parentPath, _ = s.resolveDIDPath(volumeID, CNIDRoot) + } + if path == "" { + return parentPath, NoErr + } + return s.resolvePath(parentPath, path, pathType) +} + +func (s *Service) applyFinderInfo(bitmap uint16, finderInfo [32]byte, targetPath string, volID uint16) { + if bitmap&FileBitmapFinderInfo != 0 { + m := s.metaFor(volID) + if m == nil { + return + } + if err := m.WriteFinderInfo(targetPath, finderInfo); err != nil { + netlog.Debug("[AFP] writeFinderInfo %q: %v", targetPath, err) + } + } +} diff --git a/service/afp/resolve_path_test.go b/service/afp/resolve_path_test.go index 547d7d3..42da211 100644 --- a/service/afp/resolve_path_test.go +++ b/service/afp/resolve_path_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -6,7 +8,7 @@ import ( ) func TestAFPService_resolvePath(t *testing.T) { - s := NewAFPService("TestServer", []VolumeConfig{ + s := NewService("TestServer", []VolumeConfig{ {Name: "Vol1", Path: "/volumes/share"}, }, nil, nil) @@ -114,9 +116,9 @@ func TestAFPService_resolvePath(t *testing.T) { } func TestAFPService_resolvePath_ReservedCharsDisabled(t *testing.T) { - s := NewAFPService("TestServer", []VolumeConfig{ + s := NewService("TestServer", []VolumeConfig{ {Name: "Vol1", Path: "/volumes/share"}, - }, nil, nil, AFPOptions{DecomposedFilenames: false}) + }, nil, nil, Options{DecomposedFilenames: false}) gotPath, gotCode := s.resolvePath("/volumes/share", "docs/file.txt", 2) if gotCode != ErrAccessDenied { diff --git a/service/afp/resource_fork.go b/service/afp/resource_fork.go index 1ebe7ed..6818aa0 100644 --- a/service/afp/resource_fork.go +++ b/service/afp/resource_fork.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp // Classic Mac OS resource-fork parsing. Used by the AFP Desktop database diff --git a/service/afp/root_volume_name_test.go b/service/afp/root_volume_name_test.go index bd4857b..b5a95c5 100644 --- a/service/afp/root_volume_name_test.go +++ b/service/afp/root_volume_name_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -36,7 +38,7 @@ func TestHandleGetDirParms_RootUsesVolumeName(t *testing.T) { t.Fatalf("mkdir backing dir: %v", err) } - s := NewAFPService("TestServer", []VolumeConfig{{Name: "foo", Path: backingDir}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "foo", Path: backingDir}}, &LocalFileSystem{}, nil) res, errCode := s.handleGetDirParms(&FPGetDirParmsReq{ VolumeID: 1, @@ -65,7 +67,7 @@ func TestHandleGetDirParms_ReadOnlyVolumeAccessRights(t *testing.T) { t.Fatalf("mkdir backing dir: %v", err) } - s := NewAFPService("TestServer", []VolumeConfig{{Name: "foo", Path: backingDir, ReadOnly: true}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "foo", Path: backingDir, ReadOnly: true}}, &LocalFileSystem{}, nil) res, errCode := s.handleGetDirParms(&FPGetDirParmsReq{ VolumeID: 1, @@ -94,7 +96,7 @@ func TestHandleGetDirParms_ReadOnlyVolumeAttributesDoNotUseWriteInhibitBit(t *te t.Fatalf("mkdir backing dir: %v", err) } - s := NewAFPService("TestServer", []VolumeConfig{{Name: "foo", Path: backingDir, ReadOnly: true}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "foo", Path: backingDir, ReadOnly: true}}, &LocalFileSystem{}, nil) res, errCode := s.handleGetDirParms(&FPGetDirParmsReq{ VolumeID: 1, diff --git a/service/afp/server.go b/service/afp/server.go index afab5d1..ec2832f 100644 --- a/service/afp/server.go +++ b/service/afp/server.go @@ -1,3 +1,5 @@ +//go:build afp || all + /* Package afp implements the AppleTalk Filing Protocol (AFP) 2.x. @@ -10,262 +12,159 @@ https://dev.os9.ca/techpubs/mac/Networking/Networking-223.html package afp import ( - "bytes" - "encoding/binary" + "context" + "errors" "fmt" - "io/fs" - "log" - "os" - "path/filepath" - "runtime/debug" - "strings" "sync" - "time" - - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" -) - -// AFP Commands. -// Inside Macintosh: Networking. -const ( - FPByteRangeLock = 1 // lock byte ranges in an open fork. - FPCloseVol = 2 // notify server that a workstation no longer needs a volume. - FPCloseDir = 3 // close a directory on a variable Directory ID volume. - FPCloseFork = 4 // close an open fork. - FPCopyFile = 5 // copy a file from one server volume to another. - FPCreateDir = 6 // create a new directory. - FPCreateFile = 7 // create a new file. - FPDelete = 8 // delete a file or empty directory. - FPEnumerate = 9 // list files and directories within a directory. - FPFlush = 10 // flush data associated with a volume to disk. - FPFlushFork = 11 // write an open fork's internal buffers to disk. - FPGetDirParms = 12 - FPGetFileParms = 13 - FPGetForkParms = 14 // read an open fork's parameters. - FPGetSrvrInfo = 15 // get server information (name, version strings, UAMs, flags) without opening a session. - FPGetSrvrParms = 16 // get list of server volumes after a session is established. - FPGetVolParms = 17 // get parameters for a given volume. - FPLogin = 18 // authenticate user and establish a session. - FPLoginCont = 19 // continue multi-step user authentication process. - FPLogout = 20 // terminate an AFP session. - FPMapID = 21 // map user or group ID to the corresponding name. - FPMapName = 22 // map user or group name to the corresponding ID. - FPMoveAndRename = 23 // move and optionally rename a file or directory to a different parent directory. - FPOpenVol = 24 // request access to a volume, optionally providing a password. - FPOpenDir = 25 // open a directory on a variable Directory ID volume to retrieve its Directory ID. - FPOpenFork = 26 // open a data or resource fork of an existing file. - FPGetSrvrMsg = 38 - FPRead = 27 // read data from an open fork. - FPRename = 28 // rename a file or directory. - FPSetDirParms = 29 // change parameters of a specified directory. - FPSetFileParms = 30 // change parameters of a specified file. - FPSetForkParms = 31 // change parameters of an open fork. - FPSetVolParms = 32 // change parameters of a specified volume. - FPWrite = 33 // write data to an open fork. - FPGetFileDirParms = 34 // get parameters associated with a given file or directory. - FPSetFileDirParms = 35 // set parameters common to both files and directories. - FPChangePassword = 36 // change a user's password. - FPGetUserInfo = 37 // retrieve information about a user (AFP 2.0+). - - // AFP 2.2 additions. - FPExchangeFiles = 42 - - // AFP 2.1 catalogued search. - FPCatSearch = 43 - // AFP 2.0+ Desktop Database commands (Inside Macintosh: Networking §C). - // Finder uses these to store/retrieve icons, application mappings, and comments. - FPOpenDT = 48 // open the Desktop database for access. - FPCloseDT = 49 // close access to the Desktop database. - FPGetIcon = 51 // retrieve a specific icon bitmap from the Desktop database. - FPGetIconInfo = 52 // get description or determine set of icons for an application. - FPAddAPPL = 53 // register an application mapping (APPL) in the Desktop database. - FPRemoveAPPL = 54 // remove an application mapping from the Desktop database. - FPGetAPPL = 55 // get an application mapping from the Desktop database. - FPAddComment = 56 // add or replace a Finder comment for a file or directory. - FPRemoveComment = 57 // remove a Finder comment for a file or directory. - FPGetComment = 58 // retrieve a Finder comment for a file or directory. - FPAddIcon = 192 // add a new icon bitmap to the Desktop database. (special: maps to ASPUserWrite) + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/protocol/ddp" + "github.com/pgodw/omnitalk/service" ) -// forkHandle tracks an open fork (data or resource). -type forkHandle struct { - file File // nil for an empty resource fork - isRsrc bool - rsrcOff int64 // offset within the AppleDouble file where resource data starts - rsrcLen int64 // current length of resource fork data - rsrcLenFieldAt int64 // file offset of the ResourceFork entry's length field in the AppleDouble header - filePath string // absolute path of the file whose fork is open - volID uint16 // volume this fork belongs to -} - -type byteRangeLock struct { - lockKey string - ownerFork uint16 - start int64 - length int64 // -1 means open-ended (to EOF) -} - -const defaultMaxByteRangeLocks = 4096 - -// AFPService implements AppleTalk Filing Protocol. -type AFPService struct { +// Service implements AppleTalk Filing Protocol. +type Service struct { ServerName string + + // Volume registry. Populated once by installVolumes during NewService and + // read-only thereafter — no runtime call path adds, removes, or mutates + // these maps, so they need no synchronisation. Volumes []Volume fs FileSystem + volumeFS map[uint16]FileSystem meta ForkMetadataBackend // global override when ForkMetadataBackend is injected via options metas map[uint16]ForkMetadataBackend // per-volume backends (keyed by Volume.ID) - mu sync.RWMutex - options AFPOptions cnidStores map[uint16]CNIDStore - desktopDB DesktopDBBackend - forks map[uint16]*forkHandle - nextFork uint16 - byteLocks []byteRangeLock - maxLocks int - users map[string]string // map[username]password - nextSRefNum uint16 + options Options + desktopDB DesktopDBBackend + forks forkState + maxReadSize int // transport quantum limit; 0 = unlimited - // volumeBackupDate stores AFP "backup date" (ADouble-style seconds since 1904) - // per volume, as set by FPSetVolParms (AFP 2.x §5.1.32). - volumeBackupDate map[uint16]uint32 + sessions sessionState + + // FPSetVolParms-supplied per-volume backup dates (AFP 2.x §5.1.32). Only + // runtime-mutable piece of volume state. + backupDates backupDates // Desktop database state — one DesktopDB per volume (persists across sessions). - desktopDBs map[uint16]DesktopDB - dtRefs map[uint16]uint16 // DTRefNum → volume ID - nextDTRef uint16 + desktop desktopState transports []Transport dumper service.PacketDumper + + stop chan struct{} + wg sync.WaitGroup } -func (s *AFPService) SetPacketDumper(dumper service.PacketDumper) { +func (s *Service) SetPacketDumper(dumper service.PacketDumper) { s.dumper = dumper } -func (s *AFPService) logPacket(format string, args ...any) { - msg := fmt.Sprintf(format, args...) - if s.dumper != nil { - s.dumper.LogPacket(msg) +// applyMaxReadSize caps FPRead ReqCount to n bytes and propagates the same +// limit to any filesystem that supports range limiting (e.g. +// MacGardenFileSystem). Called from Start after each transport has resolved +// its quantum; n=0 leaves reads uncapped. +func (s *Service) applyMaxReadSize(n int) { + s.maxReadSize = n + if n == 0 { + return + } + type rangeLimiter interface{ SetMaxRangeSize(int) } + if rl, ok := s.fs.(rangeLimiter); ok { + rl.SetMaxRangeSize(n) + } + for _, vfs := range s.volumeFS { + if rl, ok := vfs.(rangeLimiter); ok { + rl.SetMaxRangeSize(n) + } } } -func NewAFPService(serverName string, configs []VolumeConfig, fs FileSystem, transports []Transport, opts ...AFPOptions) *AFPService { - options := DefaultAFPOptions() +func NewService(serverName string, configs []VolumeConfig, fs FileSystem, transports []Transport, opts ...Options) *Service { + options := DefaultOptions() if len(opts) > 0 { options = opts[0] } - s := &AFPService{ + s := &Service{ ServerName: serverName, fs: fs, + stop: make(chan struct{}), + volumeFS: make(map[uint16]FileSystem), options: options, cnidStores: make(map[uint16]CNIDStore), desktopDB: resolveDesktopDBBackend(options), - forks: make(map[uint16]*forkHandle), - nextFork: 1, - byteLocks: make([]byteRangeLock, 0), - maxLocks: defaultMaxByteRangeLocks, - users: make(map[string]string), - nextSRefNum: 1, - - volumeBackupDate: make(map[uint16]uint32), - - desktopDBs: make(map[uint16]DesktopDB), - dtRefs: make(map[uint16]uint16), - nextDTRef: 1, + forks: newForkState(defaultMaxByteRangeLocks), + sessions: newSessionState(), + backupDates: newBackupDates(), + desktop: newDesktopState(), transports: transports, } - if options.ForkMetadataBackend != nil { - // Test injection: single global backend for all volumes. - s.meta = options.ForkMetadataBackend - } else { - // Normal path: build a per-volume backend using each volume's AppleDoubleMode - // (falling back to options.AppleDoubleMode if the volume does not specify one). - s.metas = make(map[uint16]ForkMetadataBackend) - } - - cnidBackend := resolveCNIDBackend(options) - for i, cfg := range configs { - volume := Volume{ - Config: cfg, - ID: uint16(i + 1), - } - s.Volumes = append(s.Volumes, volume) - store := cnidBackend.Open(volume) - store.EnsureReserved(filepath.Clean(cfg.Path), CNIDRoot) - s.cnidStores[volume.ID] = store - - if s.metas != nil && fs != nil { - mode := cfg.AppleDoubleMode - if mode == "" { - mode = options.AppleDoubleMode - } - s.metas[volume.ID] = NewAppleDoubleBackend(fs, mode, options.DecomposedFilenames) - } - } - go s.rebuildDesktopDBsIfConfigured() + s.initForkMetadata(options) + s.installVolumes(configs, fs) + s.spawnDesktopRebuild() return s } -// metaFor returns the ForkMetadataBackend for the given volume ID. -// If a per-volume backend is registered it is returned; otherwise the global -// injected backend (s.meta) is used. Returns nil when neither is available. -func (s *AFPService) metaFor(volID uint16) ForkMetadataBackend { - if s.metas != nil { - if m, ok := s.metas[volID]; ok { - return m - } - } - return s.meta -} -// metaForPath returns the ForkMetadataBackend for the volume whose root path -// is a prefix of path. Falls back to the global injected backend when no -// matching volume is found. -func (s *AFPService) metaForPath(path string) ForkMetadataBackend { - clean := filepath.Clean(path) - for _, vol := range s.Volumes { - rel, err := filepath.Rel(vol.Config.Path, clean) - if err == nil && !strings.HasPrefix(rel, "..") { - return s.metaFor(vol.ID) +// Start initializes all underlying transports and resolves the read-size cap +// from whichever transport advertises the smallest non-zero quantum. +func (s *Service) Start(ctx context.Context, router service.Router) error { + for _, t := range s.transports { + if err := t.Start(ctx, router); err != nil { + return err } } - return s.meta -} - -// Start initializes all underlying transports. -func (s *AFPService) Start(router service.Router) error { + cap := 0 for _, t := range s.transports { - if err := t.Start(router); err != nil { - return err + n := t.MaxReadSize() + if n <= 0 { + continue + } + if cap == 0 || n < cap { + cap = n } } + s.applyMaxReadSize(cap) return nil } // Stop shuts down all underlying transports. -func (s *AFPService) Stop() error { +func (s *Service) Stop() error { var errs []error + if s.stop != nil { + select { + case <-s.stop: + default: + close(s.stop) + } + } for _, t := range s.transports { if err := t.Stop(); err != nil { errs = append(errs, err) } } + s.wg.Wait() + type closer interface{ Close() error } + for _, fsys := range s.volumeFS { + if c, ok := fsys.(closer); ok { + if err := c.Close(); err != nil { + errs = append(errs, err) + } + } + } if len(errs) > 0 { - return fmt.Errorf("AFPService Stop errors: %v", errs) + return fmt.Errorf("afp: stop: %w", errors.Join(errs...)) } return nil } // Socket returns the AppleTalk socket number if any of the transports listen on one. // We return asp.ServerSocket (252) if we have a transport that needs it. -func (s *AFPService) Socket() uint8 { +func (s *Service) Socket() uint8 { // The router expects services that listen on a specific socket to return it here. // Since AFPService wraps transports, we return the well-known ASP socket (252). // TCP-only instances won't be called for AppleTalk routing anyway if they don't register NBP. @@ -273,1155 +172,15 @@ func (s *AFPService) Socket() uint8 { } // Inbound delegates inbound DDP packets to the underlying transports. -func (s *AFPService) Inbound(d appletalk.Datagram, p port.Port) { +func (s *Service) Inbound(d ddp.Datagram, p port.Port) { for _, t := range s.transports { t.Inbound(d, p) } } // GetStatus implements the CommandHandler interface -func (s *AFPService) GetStatus() []byte { +func (s *Service) GetStatus() []byte { return BuildServerInfo(s.ServerName) } -type Request interface { - Unmarshal(data []byte) error - String() string -} - -type Response interface { - Marshal() []byte - String() string -} - -func (s *AFPService) HandleCommand(data []byte) (resBytes []byte, errCode int32) { - defer func() { - if r := recover(); r != nil { - log.Printf("[AFP] PANIC in cmd=%d: %v\n%s", data[0], r, debug.Stack()) - resBytes = nil - errCode = ErrParamErr - } - }() - if len(data) == 0 { - return nil, ErrParamErr - } - - cmd := data[0] - - var req Request - var handler func(Request) (Response, int32) - - switch cmd { - case FPGetSrvrInfo: - req = &FPGetSrvrInfoReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleGetSrvrInfo(req.(*FPGetSrvrInfoReq)) - if err != nil { - return nil, ErrMiscErr - } - return res, NoErr - } - case FPGetSrvrParms: - req = &FPGetSrvrParmsReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleGetSrvrParms(req.(*FPGetSrvrParmsReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPLogin: - req = &FPLoginReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleLogin(req.(*FPLoginReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPLogout: - req = &FPLogoutReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleLogout(req.(*FPLogoutReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPOpenVol: - req = &FPOpenVolReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleOpenVol(req.(*FPOpenVolReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPGetVolParms: - req = &FPGetVolParmsReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleGetVolParms(req.(*FPGetVolParmsReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPOpenDir: - req = &FPOpenDirReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleOpenDir(req.(*FPOpenDirReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPCloseVol: - req = &FPCloseVolReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleCloseVol(req.(*FPCloseVolReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPCloseDir: - req = &FPCloseDirReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleCloseDir(req.(*FPCloseDirReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPCloseFork: - req = &FPCloseForkReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleCloseFork(req.(*FPCloseForkReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPFlush: - req = &FPFlushReq{} - handler = func(req Request) (Response, int32) { - return s.handleFlush(req.(*FPFlushReq)) - } - case FPFlushFork: - req = &FPFlushForkReq{} - handler = func(req Request) (Response, int32) { - return s.handleFlushFork(req.(*FPFlushForkReq)) - } - case FPEnumerate: - req = &FPEnumerateReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleEnumerate(req.(*FPEnumerateReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPGetFileDirParms: - req = &FPGetFileDirParmsReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleGetFileDirParms(req.(*FPGetFileDirParmsReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPOpenFork: - req = &FPOpenForkReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleOpenFork(req.(*FPOpenForkReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPRead: - req = &FPReadReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleRead(req.(*FPReadReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPWrite: - req = &FPWriteReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleWrite(req.(*FPWriteReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPCreateFile: - req = &FPCreateFileReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleCreateFile(req.(*FPCreateFileReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPCreateDir: - req = &FPCreateDirReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleCreateDir(req.(*FPCreateDirReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPDelete: - req = &FPDeleteReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleDelete(req.(*FPDeleteReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPRename: - req = &FPRenameReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleRename(req.(*FPRenameReq)) - if res == nil { - return nil, err - } - return res, err - } - // --- Commands with minimal compatibility implementations --- - - case FPByteRangeLock: // byte-range locking (Finder uses during copy) - req = &FPByteRangeLockReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleByteRangeLock(req.(*FPByteRangeLockReq)) - if res == nil { - return nil, err - } - return res, err - } - case FPCopyFile: - req = &FPCopyFileReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleCopyFile(req.(*FPCopyFileReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPGetDirParms: - req = &FPGetDirParmsReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleGetDirParms(req.(*FPGetDirParmsReq)) - if res == nil { - return nil, err - } - return res, err - } - - case FPGetFileParms: - req = &FPGetFileParmsReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleGetFileParms(req.(*FPGetFileParmsReq)) - if res == nil { - return nil, err - } - return res, err - } - - case FPGetForkParms: - req = &FPGetForkParmsReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleGetForkParms(req.(*FPGetForkParmsReq)) - if res == nil { - return nil, err - } - return res, err - } - - case FPLoginCont: // TODO: Implement second-phase UAM login (AFP 2.x §5.1.19) - req = &FPLoginContReq{} - handler = func(req Request) (Response, int32) { - log.Printf("[AFP] TODO: Implement FPLoginCont called — not implemented") - return nil, ErrCallNotSupported - } - - case FPMapID: - req = &FPMapIDReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleMapID(req.(*FPMapIDReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPMapName: - req = &FPMapNameReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleMapName(req.(*FPMapNameReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPMoveAndRename: - req = &FPMoveAndRenameReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleMoveAndRename(req.(*FPMoveAndRenameReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPSetDirParms: - req = &FPSetDirParmsReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleSetDirParms(req.(*FPSetDirParmsReq)) - if res == nil { - return nil, err - } - return res, err - } - - case FPSetFileParms: - req = &FPSetFileParmsReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleSetFileParms(req.(*FPSetFileParmsReq)) - if res == nil { - return nil, err - } - return res, err - } - - case FPSetForkParms: - req = &FPSetForkParmsReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleSetForkParms(req.(*FPSetForkParmsReq)) - if res == nil { - return nil, err - } - return res, err - } - - case FPSetVolParms: - req = &FPSetVolParmsReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleSetVolParms(req.(*FPSetVolParmsReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPSetFileDirParms: - req = &FPSetFileDirParmsReq{} - handler = func(req Request) (Response, int32) { - res, err := s.handleSetFileDirParms(req.(*FPSetFileDirParmsReq)) - if res == nil { - return nil, err - } - return res, err - } - - case FPExchangeFiles: - req = &FPExchangeFilesReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleExchangeFiles(req.(*FPExchangeFilesReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPGetSrvrMsg: - req = &FPGetSrvrMsgReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleGetSrvrMsg(req.(*FPGetSrvrMsgReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPChangePassword: // changing passwords is not supported - req = &FPUnsupportedReq{} - handler = func(req Request) (Response, int32) { - return nil, ErrCallNotSupported - } - - case FPGetUserInfo: // user info not supported; full permissions assumed - req = &FPUnsupportedReq{} - handler = func(req Request) (Response, int32) { - return nil, ErrCallNotSupported - } - - case FPCatSearch: // TODO: Implement catalogued volume search (AFP 2.1) - req = &FPCatSearchReq{} - handler = func(req Request) (Response, int32) { - log.Printf("[AFP] TODO: Implement FPCatSearch called — not implemented") - return nil, ErrCallNotSupported - } - - // --- TODO Desktop Database commands (AFP 2.1+) --- - // Finder uses the Desktop DB to store icons, application mappings (APPL tags), - // and Get Info comments. Without this, icons fall back to generic defaults. - - case FPOpenDT: // open Desktop Database — create .AppleDesktop dir and .desktop.db cache - req = &FPOpenDTReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleOpenDT(req.(*FPOpenDTReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPCloseDT: // close Desktop Database — invalidate DTRefNum - req = &FPCloseDTReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleCloseDT(req.(*FPCloseDTReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPGetIcon: // retrieve icon bitmap from Desktop database - req = &FPGetIconReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleGetIcon(req.(*FPGetIconReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPGetIconInfo: // retrieve icon metadata from Desktop database - req = &FPGetIconInfoReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleGetIconInfo(req.(*FPGetIconInfoReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPAddIcon: // add icon bitmap to Desktop database - req = &FPAddIconReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleAddIcon(req.(*FPAddIconReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPAddAPPL: // register APPL mapping in Desktop database - req = &FPAddAPPLReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleAddAPPL(req.(*FPAddAPPLReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPRemoveAPPL: // remove APPL mapping from Desktop database - req = &FPRemoveAPPLReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleRemoveAPPL(req.(*FPRemoveAPPLReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPGetAPPL: // retrieve APPL mapping from Desktop database - req = &FPGetAPPLReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleGetAPPL(req.(*FPGetAPPLReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPAddComment: // add Finder comment to Desktop database - req = &FPAddCommentReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleAddComment(req.(*FPAddCommentReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPRemoveComment: // remove Finder comment from Desktop database - req = &FPRemoveCommentReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleRemoveComment(req.(*FPRemoveCommentReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - case FPGetComment: // retrieve Finder comment from Desktop database - req = &FPGetCommentReq{} - handler = func(req Request) (Response, int32) { - res, errCode := s.handleGetComment(req.(*FPGetCommentReq)) - if res == nil { - return nil, errCode - } - return res, errCode - } - - default: - log.Printf("[AFP] unknown command %d", cmd) - return nil, ErrCallNotSupported - } - - cmdData := data - if cmd == FPLogin { - // FPLoginReq.Unmarshal expects data without the command byte. - cmdData = data[1:] - } - - if err := req.Unmarshal(cmdData); err != nil { - log.Printf("[AFP] Error unmarshaling cmd %d: %v", cmd, err) - return nil, ErrParamErr - } - - s.logPacket("[AFP] → %s", req.String()) - s.logResolvedPaths(req) - - var res Response - res, errCode = handler(req) - - if res != nil { - s.logPacket("[AFP] ← %s (err=%d)", res.String(), errCode) - resBytes = res.Marshal() - } else if errCode != NoErr { - s.logPacket("[AFP] ← cmd=%d err=%d", cmd, errCode) - } - - return resBytes, errCode -} - -func (s *AFPService) logResolvedPaths(req Request) { - switch r := req.(type) { - case *FPOpenDirReq: - s.logResolvedPath("FPOpenDir", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPEnumerateReq: - s.logResolvedPath("FPEnumerate", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPGetFileDirParmsReq: - s.logResolvedPath("FPGetFileDirParms", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPGetDirParmsReq: - s.logResolvedPath("FPGetDirParms", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPGetFileParmsReq: - s.logResolvedPath("FPGetFileParms", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPOpenForkReq: - s.logResolvedPath("FPOpenFork", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPCreateFileReq: - s.logResolvedPath("FPCreateFile", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPCreateDirReq: - s.logResolvedPath("FPCreateDir", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPDeleteReq: - s.logResolvedPath("FPDelete", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPSetDirParmsReq: - s.logResolvedPath("FPSetDirParms", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPSetFileParmsReq: - s.logResolvedPath("FPSetFileParms", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPSetFileDirParmsReq: - s.logResolvedPath("FPSetFileDirParms", r.VolumeID, r.DirID, r.PathType, r.Path) - case *FPRenameReq: - s.logResolvedPath("FPRename old", r.VolumeID, r.DirID, r.PathType, r.Name) - s.logResolvedPath("FPRename new", r.VolumeID, r.DirID, r.NewPathType, r.NewName) - case *FPMoveAndRenameReq: - s.logResolvedPath("FPMoveAndRename src", r.VolumeID, r.SrcDirID, r.SrcPathType, r.SrcName) - s.logResolvedPath("FPMoveAndRename dstDir", r.VolumeID, r.DstDirID, r.DstPathType, r.DstDirName) - case *FPExchangeFilesReq: - s.logResolvedPath("FPExchangeFiles src", r.VolumeID, r.SrcDirID, r.SrcPathType, r.SrcName) - s.logResolvedPath("FPExchangeFiles dst", r.VolumeID, r.DstDirID, r.DstPathType, r.DstName) - case *FPCopyFileReq: - s.logResolvedPath("FPCopyFile src", r.SrcVolumeID, r.SrcDirID, r.SrcPathType, r.SrcName) - s.logResolvedPath("FPCopyFile dstDir", r.DstVolumeID, r.DstDirID, r.DstPathType, r.DstDirName) - case *FPAddAPPLReq: - s.logResolvedPathFromDTRef("FPAddAPPL", r.DTRefNum, r.DirID, r.PathType, r.Path) - case *FPRemoveAPPLReq: - s.logResolvedPathFromDTRef("FPRemoveAPPL", r.DTRefNum, r.DirID, r.PathType, r.Path) - case *FPAddCommentReq: - s.logResolvedPathFromDTRef("FPAddComment", r.DTRefNum, r.DirID, r.PathType, r.Path) - case *FPRemoveCommentReq: - s.logResolvedPathFromDTRef("FPRemoveComment", r.DTRefNum, r.DirID, r.PathType, r.Path) - case *FPGetCommentReq: - s.logResolvedPathFromDTRef("FPGetComment", r.DTRefNum, r.DirID, r.PathType, r.Path) - } -} - -func (s *AFPService) logResolvedPath(op string, volumeID uint16, dirID uint32, pathType uint8, rawPath string) { - resolved, errCode := s.resolveVolumePath(volumeID, dirID, rawPath, pathType) - if errCode == NoErr { - log.Printf("[AFP][Path] %s vol=%d dirID=%d pathType=%d raw=%q resolved=%q", op, volumeID, dirID, pathType, rawPath, resolved) - return - } - log.Printf("[AFP][Path] %s vol=%d dirID=%d pathType=%d raw=%q unresolved err=%d", op, volumeID, dirID, pathType, rawPath, errCode) -} - -func (s *AFPService) logResolvedPathFromDTRef(op string, dtRefNum uint16, dirID uint32, pathType uint8, rawPath string) { - s.mu.RLock() - volID, ok := s.dtRefs[dtRefNum] - s.mu.RUnlock() - if !ok { - log.Printf("[AFP][Path] %s dtRef=%d dirID=%d pathType=%d raw=%q unresolved err=%d", op, dtRefNum, dirID, pathType, rawPath, ErrParamErr) - return - } - s.logResolvedPath(op, volID, dirID, pathType, rawPath) -} - -// statPathWithAppleDoubleFallback stats path and, if missing, retries with a -// "._" prefixed basename to support orphan AppleDouble files. -func (s *AFPService) statPathWithAppleDoubleFallback(path string) (string, fs.FileInfo, error) { - m := s.metaForPath(path) - if m == nil { - return path, nil, os.ErrNotExist - } - return m.StatWithMetadataFallback(path) -} - -// iconFileNameFor returns the host filesystem name for the Mac "Icon\r" file -// for the given volume, respecting its AppleDouble mode and decomposed filename settings. -func (s *AFPService) iconFileNameFor(volID uint16) string { - if m := s.metaFor(volID); m != nil { - return m.IconFileName() - } - if s.options.DecomposedFilenames { - return "Icon0x0D" - } - return "Icon\r" -} - -// canonicalizePath remaps any Icon\r variant in path to the canonical host -// name for the configured backend (e.g. Icon0x0D→Icon_ in legacy mode). -// This is applied during path resolution so both reads and writes use the -// correct on-disk name without duplicating the alias logic in every handler. -func (s *AFPService) canonicalizePath(path string) string { - m := s.metaForPath(path) - if m == nil { - return path - } - base := filepath.Base(path) - canonical := m.IconFileName() - if isIconFile(base) && base != canonical { - return filepath.Join(filepath.Dir(path), canonical) - } - return path -} - -// alwaysHiddenNames lists directory and file names that are always hidden from -// AFP clients regardless of volume backend or AppleDouble mode. Names are -// matched case-insensitively. -var alwaysHiddenNames = []string{ - ".appledesktop", - ".appledouble", -} - -func (s *AFPService) isMetadataArtifact(name string, isDir bool, volID uint16) bool { - if !isDir && strings.EqualFold(name, afpSQLiteFilename) { - return true - } - for _, hidden := range alwaysHiddenNames { - if strings.EqualFold(name, hidden) { - return true - } - } - if m := s.metaFor(volID); m != nil { - return m.IsMetadataArtifact(name, isDir) - } - return strings.HasPrefix(name, "._") -} - -// moveAppleDoubleSidecar renames an AppleDouble sidecar (._name) alongside a -// primary file rename/move. This is best-effort: missing sidecars are silently -// ignored, and unexpected errors are logged but not returned to the caller so -// that a sidecar failure never causes the already-completed primary operation -// to report an error to the client. -func (s *AFPService) moveAppleDoubleSidecar(oldPath, newPath string) error { - m := s.metaForPath(oldPath) - if m == nil { - return nil - } - if err := m.MoveMetadata(oldPath, newPath); err != nil { - log.Printf("[AFP] warning: could not move metadata %s → %s: %v", oldPath, newPath, err) - } - return nil -} - -// deleteAppleDoubleSidecar removes a file's AppleDouble sidecar. This is -// best-effort: missing sidecars are silently ignored, and unexpected errors -// are logged but not returned to the caller. -func (s *AFPService) deleteAppleDoubleSidecar(path string) error { - m := s.metaForPath(path) - if m == nil { - return nil - } - if err := m.DeleteMetadata(path); err != nil { - log.Printf("[AFP] warning: could not delete metadata for %s: %v", path, err) - } - return nil -} - -// calcVolParamsSize returns the total byte size of all fixed fields (including -// variable-name offset pointers) for a volume parameter block with the given bitmap. -func calcVolParamsSize(bitmap uint16) int { - size := 0 - if bitmap&VolBitmapAttributes != 0 { - size += 2 - } - if bitmap&VolBitmapSignature != 0 { - size += 2 - } - if bitmap&VolBitmapCreateDate != 0 { - size += 4 - } - if bitmap&VolBitmapModDate != 0 { - size += 4 - } - if bitmap&VolBitmapBackupDate != 0 { - size += 4 - } - if bitmap&VolBitmapVolID != 0 { - size += 2 - } - if bitmap&VolBitmapBytesFree != 0 { - size += 4 - } - if bitmap&VolBitmapBytesTotal != 0 { - size += 4 - } - if bitmap&VolBitmapName != 0 { - size += 2 // offset pointer - } - if bitmap&VolBitmapExtBytesFree != 0 { - size += 8 - } - if bitmap&VolBitmapExtBytesTotal != 0 { - size += 8 - } - if bitmap&VolBitmapBlockSize != 0 { - size += 4 - } - return size -} - -// calcDirParamsSize returns the total byte size of all fixed fields (including -// variable-name offset pointers) for a directory parameter block with the given bitmap. -func calcDirParamsSize(bitmap uint16) int { - size := 0 - if bitmap&DirBitmapAttributes != 0 { - size += 2 - } - if bitmap&DirBitmapParentDID != 0 { - size += 4 - } - if bitmap&DirBitmapCreateDate != 0 { - size += 4 - } - if bitmap&DirBitmapModDate != 0 { - size += 4 - } - if bitmap&DirBitmapBackupDate != 0 { - size += 4 - } - if bitmap&DirBitmapFinderInfo != 0 { - size += 32 - } - if bitmap&DirBitmapLongName != 0 { - size += 2 // offset pointer - } - if bitmap&DirBitmapShortName != 0 { - size += 2 // offset pointer - } - if bitmap&DirBitmapDirID != 0 { - size += 4 - } - if bitmap&DirBitmapOffspringCount != 0 { - size += 2 - } - if bitmap&DirBitmapOwnerID != 0 { - size += 4 - } - if bitmap&DirBitmapGroupID != 0 { - size += 4 - } - if bitmap&DirBitmapAccessRights != 0 { - size += 4 - } - if bitmap&DirBitmapProDOSInfo != 0 { - size += 6 - } - return size -} - -// calcFileParamsSize returns the total byte size of all fixed fields (including -// variable-name offset pointers) for a file parameter block with the given bitmap. -func calcFileParamsSize(bitmap uint16) int { - size := 0 - if bitmap&FileBitmapAttributes != 0 { - size += 2 - } - if bitmap&FileBitmapParentDID != 0 { - size += 4 - } - if bitmap&FileBitmapCreateDate != 0 { - size += 4 - } - if bitmap&FileBitmapModDate != 0 { - size += 4 - } - if bitmap&FileBitmapBackupDate != 0 { - size += 4 - } - if bitmap&FileBitmapFinderInfo != 0 { - size += 32 - } - if bitmap&FileBitmapLongName != 0 { - size += 2 // offset pointer - } - if bitmap&FileBitmapShortName != 0 { - size += 2 // offset pointer - } - if bitmap&FileBitmapFileNum != 0 { - size += 4 - } - if bitmap&FileBitmapDataForkLen != 0 { - size += 4 - } - if bitmap&FileBitmapRsrcForkLen != 0 { - size += 4 - } - if bitmap&FileBitmapProDOSInfo != 0 { - size += 6 - } - return size -} - -func (s *AFPService) packFileInfo(buf *bytes.Buffer, volumeID uint16, bitmap uint16, parentPath, name string, info fs.FileInfo, isDir bool) { - var varBuf bytes.Buffer - fullPath := filepath.Join(parentPath, name) - name = s.catalogNameForPath(volumeID, fullPath, name) - - metadata := ForkMetadata{} - if m := s.metaFor(volumeID); m != nil { - if md, err := m.ReadForkMetadata(fullPath); err == nil { - metadata = md - } - } - if !isDir && !hasFinderTypeCreator(metadata.FinderInfo) && s.options.ExtensionMap != nil { - if mapping, ok := s.options.ExtensionMap.Lookup(fullPath); ok { - metadata.FinderInfo = applyExtensionMapping(metadata.FinderInfo, mapping) - } - } - - // Opportunistically ingest icons from the file's AppleDouble sidecar - // while we already have the metadata in hand. This populates the Desktop - // database naturally as Finder browses directories. - if EnableAppleDoubleIconFallback && !isDir { - s.IngestAppleDoubleIcons(volumeID, fullPath) - } - - if isDir { - fixedSize := calcDirParamsSize(bitmap) - - if bitmap&DirBitmapAttributes != 0 { - binary.Write(buf, binary.BigEndian, uint16(0)) - } - if bitmap&DirBitmapParentDID != 0 { - // The root directory (DID=2) has a logical parent DID of 1. - var pdir uint32 - thisDID := s.getPathDID(volumeID, fullPath) - if thisDID == CNIDRoot { - pdir = CNIDParentOfRoot - } else { - pdir = s.getPathDID(volumeID, parentPath) - } - binary.Write(buf, binary.BigEndian, pdir) - } - if bitmap&DirBitmapCreateDate != 0 { - binary.Write(buf, binary.BigEndian, uint32(toAFPTime(info.ModTime()))) - } - if bitmap&DirBitmapModDate != 0 { - binary.Write(buf, binary.BigEndian, uint32(toAFPTime(info.ModTime()))) - } - if bitmap&DirBitmapBackupDate != 0 { - binary.Write(buf, binary.BigEndian, uint32(0)) - } - if bitmap&DirBitmapFinderInfo != 0 { - buf.Write(metadata.FinderInfo[:]) - } - if bitmap&DirBitmapLongName != 0 { - offset := uint16(fixedSize + varBuf.Len()) - binary.Write(buf, binary.BigEndian, offset) - s.writeAFPName(&varBuf, name, volumeID) - } - if bitmap&DirBitmapShortName != 0 { - offset := uint16(fixedSize + varBuf.Len()) - binary.Write(buf, binary.BigEndian, offset) - s.writeAFPName(&varBuf, name, volumeID) - } - if bitmap&DirBitmapDirID != 0 { - did := s.getPathDID(volumeID, fullPath) - binary.Write(buf, binary.BigEndian, did) - } - if bitmap&DirBitmapOffspringCount != 0 { - count := uint16(0) - if entries, err := s.fs.ReadDir(fullPath); err == nil { - for _, e := range entries { - if !s.isMetadataArtifact(e.Name(), e.IsDir(), volumeID) { - count++ - } - } - } - binary.Write(buf, binary.BigEndian, count) - } - if bitmap&DirBitmapOwnerID != 0 { - binary.Write(buf, binary.BigEndian, uint32(0)) - } - if bitmap&DirBitmapGroupID != 0 { - binary.Write(buf, binary.BigEndian, uint32(0)) - } - if bitmap&DirBitmapAccessRights != 0 { - rights := uint32(0x87070707) - if s.volumeIsReadOnly(volumeID) { - // Read-only volumes should advertise read+search rights, not write. - rights = 0x87030303 - } - binary.Write(buf, binary.BigEndian, rights) - } - if bitmap&DirBitmapProDOSInfo != 0 { - buf.Write(make([]byte, 6)) - } - } else { - fixedSize := calcFileParamsSize(bitmap) - - if bitmap&FileBitmapAttributes != 0 { - attr := uint16(0) - if s.volumeIsReadOnly(volumeID) { - attr |= FileAttrWriteInhibit - } - binary.Write(buf, binary.BigEndian, attr) - } - if bitmap&FileBitmapParentDID != 0 { - pdir := s.getPathDID(volumeID, parentPath) - binary.Write(buf, binary.BigEndian, pdir) - } - if bitmap&FileBitmapCreateDate != 0 { - binary.Write(buf, binary.BigEndian, uint32(toAFPTime(info.ModTime()))) - } - if bitmap&FileBitmapModDate != 0 { - binary.Write(buf, binary.BigEndian, uint32(toAFPTime(info.ModTime()))) - } - if bitmap&FileBitmapBackupDate != 0 { - binary.Write(buf, binary.BigEndian, uint32(0)) - } - if bitmap&FileBitmapFinderInfo != 0 { - buf.Write(metadata.FinderInfo[:]) - } - if bitmap&FileBitmapLongName != 0 { - offset := uint16(fixedSize + varBuf.Len()) - binary.Write(buf, binary.BigEndian, offset) - s.writeAFPName(&varBuf, name, volumeID) - } - if bitmap&FileBitmapShortName != 0 { - offset := uint16(fixedSize + varBuf.Len()) - binary.Write(buf, binary.BigEndian, offset) - s.writeAFPName(&varBuf, name, volumeID) - } - if bitmap&FileBitmapFileNum != 0 { - did := s.getPathDID(volumeID, fullPath) - binary.Write(buf, binary.BigEndian, did) - } - if bitmap&FileBitmapDataForkLen != 0 { - binary.Write(buf, binary.BigEndian, uint32(info.Size())) - } - if bitmap&FileBitmapRsrcForkLen != 0 { - binary.Write(buf, binary.BigEndian, uint32(metadata.ResourceForkLen)) - } - if bitmap&FileBitmapProDOSInfo != 0 { - buf.Write(make([]byte, 6)) - } - } - - buf.Write(varBuf.Bytes()) -} - -func (s *AFPService) catalogNameForPath(volumeID uint16, fullPath, fallbackName string) string { - cleanPath := filepath.Clean(fullPath) - for i := range s.Volumes { - vol := s.Volumes[i] - if vol.ID != volumeID { - continue - } - if cleanPath == filepath.Clean(vol.Config.Path) && vol.Config.Name != "" { - return vol.Config.Name - } - break - } - return fallbackName -} - -func toAFPTime(t time.Time) uint32 { - epoch := time.Date(1904, 1, 1, 0, 0, 0, 0, time.Local) - if t.Before(epoch) { - return 0 - } - secs := t.Sub(epoch).Seconds() - if secs > float64(^uint32(0)) { - return ^uint32(0) - } - return uint32(secs) -} - -func (s *AFPService) cnidStore(volumeID uint16) (CNIDStore, bool) { - s.mu.RLock() - defer s.mu.RUnlock() - store, ok := s.cnidStores[volumeID] - return store, ok -} - -func (s *AFPService) getPathDID(volumeID uint16, path string) uint32 { - store, ok := s.cnidStore(volumeID) - if !ok { - return CNIDInvalid - } - return store.Ensure(path) -} - -func (s *AFPService) getDIDPath(volumeID uint16, did uint32) (string, bool) { - store, ok := s.cnidStore(volumeID) - if !ok { - return "", false - } - return store.Path(did) -} - -func (s *AFPService) resolveDIDPath(volumeID uint16, did uint32) (string, bool) { - if did == CNIDInvalid { - return "", false - } - return s.getDIDPath(volumeID, did) -} - -func (s *AFPService) rebindDIDSubtree(volumeID uint16, oldPath, newPath string) { - store, ok := s.cnidStore(volumeID) - if !ok { - return - } - store.Rebind(oldPath, newPath) -} - -func (s *AFPService) removeDIDSubtree(volumeID uint16, path string) { - store, ok := s.cnidStore(volumeID) - if !ok { - return - } - store.Remove(path) -} - -func (s *AFPService) resolvePath(parentPath, name string, pathType uint8) (string, int32) { - if pathType == 1 { - // Short names are not supported. - return "", ErrObjectNotFound - } - - // AFP pathnames are separated by null bytes (\x00). - // A single leading null byte is ignored. - if len(name) > 0 && name[0] == '\x00' { - name = name[1:] - } - - // A pathname string is composed of CNode names separated by null bytes. - // Consecutive null bytes ascend the directory tree: - // Two consecutive null bytes ascend one level. - // Three consecutive null bytes ascend two levels, etc. - elements := strings.Split(name, "\x00") - currentPath := parentPath - - for i := 0; i < len(elements); i++ { - el := elements[i] - if el == "" { - // Empty element means a null byte following another null byte (or a leading/trailing one). - // If it's the last element, it represents a trailing null byte which we can ignore. - if i == len(elements)-1 { - continue - } - // Each consecutive null byte (after the first separator) means ascending one level. - // "To ascend one level... two consecutive null bytes should follow the offspring CNode name." - // If we see an empty string here, it corresponds to ascending. - currentPath = filepath.Dir(currentPath) - } else { - hostEl := s.afpPathElementToHost(el) - if hostEl == ".." { - return "", ErrAccessDenied - } - if !s.options.DecomposedFilenames && hasHostReservedChar(hostEl) { - return "", ErrAccessDenied - } - currentPath = s.canonicalizePath(filepath.Join(currentPath, hostEl)) - } - } - - fullPath := filepath.Clean(currentPath) - - s.mu.RLock() - defer s.mu.RUnlock() - for _, vol := range s.Volumes { - rel, err := filepath.Rel(vol.Config.Path, fullPath) - if err == nil && !strings.HasPrefix(rel, "..") { - return fullPath, NoErr - } - } - return "", ErrAccessDenied -} - -func (s *AFPService) resolveSetPath(volumeID uint16, dirID uint32, path string, pathType uint8) (string, int32) { - parentPath, ok := s.resolveDIDPath(volumeID, dirID) - if !ok && dirID != 0 { - return "", ErrObjectNotFound - } else if !ok { - parentPath, _ = s.resolveDIDPath(volumeID, CNIDRoot) - } - if path == "" { - return parentPath, NoErr - } - return s.resolvePath(parentPath, path, pathType) -} - -func (s *AFPService) applyFinderInfo(bitmap uint16, finderInfo [32]byte, targetPath string, volID uint16) { - if bitmap&FileBitmapFinderInfo != 0 { - m := s.metaFor(volID) - if m == nil { - return - } - if err := m.WriteFinderInfo(targetPath, finderInfo); err != nil { - log.Printf("[AFP] writeFinderInfo %q: %v", targetPath, err) - } - } -} - -func (s *AFPService) handleGetSrvrMsg(req *FPGetSrvrMsgReq) (*FPGetSrvrMsgRes, int32) { - return &FPGetSrvrMsgRes{ - MessageType: req.MessageType, - Bitmap: 0, - Message: "", - }, NoErr -} diff --git a/service/afp/server_calls.go b/service/afp/server_calls.go index daa022e..9d9a3d4 100644 --- a/service/afp/server_calls.go +++ b/service/afp/server_calls.go @@ -1,11 +1,13 @@ +//go:build afp || all + package afp import ( - "log" + "github.com/pgodw/omnitalk/netlog" "time" ) -func (s *AFPService) handleGetSrvrInfo(req *FPGetSrvrInfoReq) (*FPGetSrvrInfoRes, error) { +func (s *Service) handleGetSrvrInfo(req *FPGetSrvrInfoReq) (*FPGetSrvrInfoRes, error) { return &FPGetSrvrInfoRes{ MachineType: "Macintosh", AFPVersions: []string{Version20, Version21}, @@ -15,10 +17,7 @@ func (s *AFPService) handleGetSrvrInfo(req *FPGetSrvrInfoReq) (*FPGetSrvrInfoRes }, nil } -func (s *AFPService) handleGetSrvrParms(req *FPGetSrvrParmsReq) (*FPGetSrvrParmsRes, int32) { - s.mu.RLock() - defer s.mu.RUnlock() - +func (s *Service) handleGetSrvrParms(req *FPGetSrvrParmsReq) (*FPGetSrvrParmsRes, int32) { res := &FPGetSrvrParmsRes{ ServerTime: toAFPTime(time.Now()), Volumes: make([]VolInfo, len(s.Volumes)), @@ -38,49 +37,40 @@ func (s *AFPService) handleGetSrvrParms(req *FPGetSrvrParmsReq) (*FPGetSrvrParms return res, NoErr } -func (s *AFPService) handleLogin(req *FPLoginReq) (*FPLoginRes, int32) { - log.Printf("[AFP] Login attempt: Version=%q, UAM=%q", req.AFPVersion, req.UAM) +func (s *Service) handleLogin(req *FPLoginReq) (*FPLoginRes, int32) { + netlog.Debug("[AFP] Login attempt: Version=%q, UAM=%q", req.AFPVersion, req.UAM) if req.AFPVersion != Version20 && req.AFPVersion != Version21 { return &FPLoginRes{}, ErrBadVersNum } - s.mu.Lock() - defer s.mu.Unlock() - if req.UAM == UAMNoUserAuthent { // Nothing else required } else if req.UAM == UAMCleartxtPasswd { - log.Printf("[AFP] Cleartxt Passwrd for User=%q", req.Username) - expectedPw, exists := s.users[req.Username] - if !exists || expectedPw != req.Password { + netlog.Debug("[AFP] Cleartxt Passwrd for User=%q", req.Username) + if !s.sessions.checkPassword(req.Username, req.Password) { return &FPLoginRes{}, ErrUserNotAuth } } else { return &FPLoginRes{}, ErrBadUAM } - sRefNum := s.nextSRefNum - s.nextSRefNum++ - return &FPLoginRes{ - SRefNum: sRefNum, + SRefNum: s.sessions.allocSRef(), IDNumber: 0, }, NoErr } // AddUser adds a user to the AFP service for authentication. -func (s *AFPService) AddUser(username, password string) { - s.mu.Lock() - defer s.mu.Unlock() - s.users[username] = password +func (s *Service) AddUser(username, password string) { + s.sessions.addUser(username, password) } -func (s *AFPService) handleLogout(req *FPLogoutReq) (*FPLogoutRes, int32) { +func (s *Service) handleLogout(req *FPLogoutReq) (*FPLogoutRes, int32) { return &FPLogoutRes{}, NoErr } -func (s *AFPService) handleMapID(req *FPMapIDReq) (*FPMapIDRes, int32) { +func (s *Service) handleMapID(req *FPMapIDReq) (*FPMapIDRes, int32) { name := "root" if req.Function == 2 || req.Function == 4 { name = "wheel" @@ -88,6 +78,6 @@ func (s *AFPService) handleMapID(req *FPMapIDReq) (*FPMapIDRes, int32) { return &FPMapIDRes{Name: name}, NoErr } -func (s *AFPService) handleMapName(req *FPMapNameReq) (*FPMapNameRes, int32) { +func (s *Service) handleMapName(req *FPMapNameReq) (*FPMapNameRes, int32) { return &FPMapNameRes{ID: 0}, NoErr } diff --git a/service/afp/server_models.go b/service/afp/server_models.go index eaf8c8f..9ab6b3e 100644 --- a/service/afp/server_models.go +++ b/service/afp/server_models.go @@ -1,9 +1,14 @@ +//go:build afp || all + package afp import ( "bytes" "encoding/binary" "fmt" + "strings" + + "github.com/pgodw/omnitalk/pkg/binutil" ) // FPGetSrvrInfoReq - request to obtain a block of descriptive information @@ -50,58 +55,94 @@ type FPGetSrvrInfoRes struct { Flags uint16 } -func (res *FPGetSrvrInfoRes) Marshal() []byte { - baseOffset := 8 + 2 + 1 + len(res.ServerName) +// layout returns the offsets used by the GetSrvrInfo reply block, plus the +// total wire size. The fixed header is 4 × uint16 offsets + 1 × uint16 Flags +// = 10 bytes; the ServerName follows immediately as a Pascal string and is +// padded to an even boundary before the rest of the variable-length fields. +func (res *FPGetSrvrInfoRes) layout() (machineOff, versionsOff, uamsOff, total int) { + const headerLen = 10 // 4 offsets + Flags + baseOffset := headerLen + 1 + len(res.ServerName) if baseOffset%2 != 0 { baseOffset++ } - - machineOffset := baseOffset - machineLen := 1 + len(res.MachineType) - - versionsOffset := machineOffset + machineLen - + machineOff = baseOffset + versionsOff = machineOff + 1 + len(res.MachineType) versionsLen := 1 for _, v := range res.AFPVersions { versionsLen += 1 + len(v) } + uamsOff = versionsOff + versionsLen + uamsLen := 1 + for _, u := range res.UAMs { + uamsLen += 1 + len(u) + } + total = uamsOff + uamsLen + return +} - uamsOffset := versionsOffset + versionsLen - - iconOffset := 0 - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.BigEndian, uint16(machineOffset)) - binary.Write(buf, binary.BigEndian, uint16(versionsOffset)) - binary.Write(buf, binary.BigEndian, uint16(uamsOffset)) - binary.Write(buf, binary.BigEndian, uint16(iconOffset)) - - binary.Write(buf, binary.BigEndian, res.Flags) - - buf.WriteByte(byte(len(res.ServerName))) - buf.WriteString(res.ServerName) +// WireSize returns the encoded length of the reply block. +func (res *FPGetSrvrInfoRes) WireSize() int { + _, _, _, total := res.layout() + return total +} - for buf.Len() < machineOffset { - buf.WriteByte(0) +// MarshalWire encodes the reply block into b. Returns ErrShortBuffer if +// b is too small. +func (res *FPGetSrvrInfoRes) MarshalWire(b []byte) (int, error) { + machineOff, versionsOff, uamsOff, total := res.layout() + if len(b) < total { + return 0, binutil.ErrShortBuffer + } + // Zero the buffer first so the gap before machineOff (caused by the + // even-boundary pad after ServerName) is left as zero bytes. + for i := 0; i < total; i++ { + b[i] = 0 } - buf.WriteByte(byte(len(res.MachineType))) - buf.WriteString(res.MachineType) - - buf.WriteByte(byte(len(res.AFPVersions))) + off := 0 + n, _ := binutil.PutU16(b[off:], uint16(machineOff)) + off += n + n, _ = binutil.PutU16(b[off:], uint16(versionsOff)) + off += n + n, _ = binutil.PutU16(b[off:], uint16(uamsOff)) + off += n + n, _ = binutil.PutU16(b[off:], 0) // iconOffset + off += n + n, _ = binutil.PutU16(b[off:], res.Flags) + off += n + + n, _ = binutil.PutPString(b[off:], []byte(res.ServerName)) + off += n + + // Skip pad bytes (already zeroed) up to machineOff. + off = machineOff + + n, _ = binutil.PutPString(b[off:], []byte(res.MachineType)) + off += n + + b[off] = byte(len(res.AFPVersions)) + off++ for _, v := range res.AFPVersions { - buf.WriteByte(byte(len(v))) - buf.WriteString(v) + n, _ = binutil.PutPString(b[off:], []byte(v)) + off += n } - buf.WriteByte(byte(len(res.UAMs))) + b[off] = byte(len(res.UAMs)) + off++ for _, u := range res.UAMs { - buf.WriteByte(byte(len(u))) - buf.WriteString(u) + n, _ = binutil.PutPString(b[off:], []byte(u)) + off += n } - return buf.Bytes() + return off, nil +} + +// Marshal allocates a buffer and encodes the reply block. Prefer MarshalWire +// when the caller can supply a buffer. +func (res *FPGetSrvrInfoRes) Marshal() []byte { + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPGetSrvrInfoRes) String() string { @@ -147,20 +188,49 @@ const ( VolInfoFlagHasPassword uint8 = 1 << 0 ) -func (res *FPGetSrvrParmsRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.ServerTime) - buf.WriteByte(uint8(len(res.Volumes))) +// WireSize returns the encoded length: 4-byte ServerTime + 1-byte volume +// count + per-volume (1-byte flags + 1-byte name len + name bytes, name +// truncated to 255). +func (res *FPGetSrvrParmsRes) WireSize() int { + n := 5 for _, v := range res.Volumes { - buf.WriteByte(v.Flags) nameLen := len(v.Name) if nameLen > 255 { nameLen = 255 } - buf.WriteByte(uint8(nameLen)) - buf.WriteString(v.Name[:nameLen]) + n += 2 + nameLen } - return buf.Bytes() + return n +} + +// MarshalWire encodes the reply block into b. +func (res *FPGetSrvrParmsRes) MarshalWire(b []byte) (int, error) { + if len(b) < res.WireSize() { + return 0, binutil.ErrShortBuffer + } + off := 0 + n, _ := binutil.PutU32(b[off:], res.ServerTime) + off += n + b[off] = uint8(len(res.Volumes)) + off++ + for _, v := range res.Volumes { + nameLen := len(v.Name) + if nameLen > 255 { + nameLen = 255 + } + b[off] = v.Flags + off++ + n, _ = binutil.PutPString(b[off:], []byte(v.Name[:nameLen])) + off += n + } + return off, nil +} + +// Marshal allocates a buffer and encodes the reply block. +func (res *FPGetSrvrParmsRes) Marshal() []byte { + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPGetSrvrParmsRes) Unmarshal(data []byte) error { @@ -283,11 +353,24 @@ type FPLoginRes struct { IDNumber uint16 } +// WireSize returns the fixed 4-byte size of the FPLoginRes block. +func (res *FPLoginRes) WireSize() int { return 4 } + +// MarshalWire encodes the reply block into b. +func (res *FPLoginRes) MarshalWire(b []byte) (int, error) { + if len(b) < 4 { + return 0, binutil.ErrShortBuffer + } + _, _ = binutil.PutU16(b[0:], res.SRefNum) + _, _ = binutil.PutU16(b[2:], res.IDNumber) + return 4, nil +} + +// Marshal allocates a buffer and encodes the reply block. func (res *FPLoginRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.SRefNum) - binary.Write(buf, binary.BigEndian, res.IDNumber) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPLoginRes) String() string { @@ -339,10 +422,29 @@ type FPMapIDRes struct { Name string } +// WireSize returns 1 byte for the length prefix plus the name length +// (truncated to 255 bytes per the Pascal-string convention). +func (res *FPMapIDRes) WireSize() int { + n := len(res.Name) + if n > 255 { + n = 255 + } + return 1 + n +} + +// MarshalWire encodes the reply block into b. +func (res *FPMapIDRes) MarshalWire(b []byte) (int, error) { + name := res.Name + if len(name) > 255 { + name = name[:255] + } + return binutil.PutPString(b, []byte(name)) +} + +// Marshal allocates a buffer and encodes the reply block. func (res *FPMapIDRes) Marshal() []byte { - b := make([]byte, 1+len(res.Name)) - b[0] = byte(len(res.Name)) - copy(b[1:], res.Name) + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) return b } @@ -375,9 +477,18 @@ type FPMapNameRes struct { ID uint32 } +// WireSize returns the fixed 4-byte ID length. +func (res *FPMapNameRes) WireSize() int { return 4 } + +// MarshalWire encodes the reply block into b. +func (res *FPMapNameRes) MarshalWire(b []byte) (int, error) { + return binutil.PutU32(b, res.ID) +} + +// Marshal allocates a buffer and encodes the reply block. func (res *FPMapNameRes) Marshal() []byte { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, res.ID) + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) return b } @@ -408,13 +519,40 @@ type FPGetSrvrMsgRes struct { Message string } +// WireSize returns 2-byte MessageType + 2-byte Bitmap + 1-byte length +// + Message bytes (truncated to 255). +func (res *FPGetSrvrMsgRes) WireSize() int { + n := len(res.Message) + if n > 255 { + n = 255 + } + return 5 + n +} + +// MarshalWire encodes the reply block into b. +func (res *FPGetSrvrMsgRes) MarshalWire(b []byte) (int, error) { + if len(b) < res.WireSize() { + return 0, binutil.ErrShortBuffer + } + off := 0 + n, _ := binutil.PutU16(b[off:], res.MessageType) + off += n + n, _ = binutil.PutU16(b[off:], res.Bitmap) + off += n + msg := res.Message + if len(msg) > 255 { + msg = msg[:255] + } + n, _ = binutil.PutPString(b[off:], []byte(msg)) + off += n + return off, nil +} + +// Marshal allocates a buffer and encodes the reply block. func (res *FPGetSrvrMsgRes) Marshal() []byte { - b := new(bytes.Buffer) - binary.Write(b, binary.BigEndian, res.MessageType) - binary.Write(b, binary.BigEndian, res.Bitmap) - b.WriteByte(byte(len(res.Message))) - b.WriteString(res.Message) - return b.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } func (res *FPGetSrvrMsgRes) String() string { @@ -426,16 +564,122 @@ type FPUnsupportedReq struct{} func (req *FPUnsupportedReq) Unmarshal(data []byte) error { return nil } func (req *FPUnsupportedReq) String() string { return "FPUnsupportedReq{}" } -// FPCatSearch - not supported; server returns ErrCallNotSupported. -type FPCatSearchReq struct{} +// FPCatSearch request (AFP 2.1). +type FPCatSearchReq struct { + VolumeID uint16 + ReqMatches int32 + Reserved uint32 + CatalogPosition [16]byte + FileRsltBitmap uint16 + DirectoryRsltBitmap uint16 + ReqBitmap uint32 + Parameters []byte +} + +func (req *FPCatSearchReq) Unmarshal(data []byte) error { + if len(data) < 36 { + return fmt.Errorf("ErrParamErr") + } + req.VolumeID = binary.BigEndian.Uint16(data[2:4]) + req.ReqMatches = int32(binary.BigEndian.Uint32(data[4:8])) + req.Reserved = binary.BigEndian.Uint32(data[8:12]) + copy(req.CatalogPosition[:], data[12:28]) + req.FileRsltBitmap = binary.BigEndian.Uint16(data[28:30]) + req.DirectoryRsltBitmap = binary.BigEndian.Uint16(data[30:32]) + req.ReqBitmap = binary.BigEndian.Uint32(data[32:36]) + if len(data) > 36 { + req.Parameters = append([]byte(nil), data[36:]...) + } else { + req.Parameters = nil + } + return nil +} + +func (req *FPCatSearchReq) String() string { + query := req.SearchQuery() + printable := req.searchPrintableParameters() + if len(printable) > 80 { + printable = printable[:80] + "..." + } + return fmt.Sprintf("FPCatSearchReq{VolumeID:%d ReqMatches:%d FileRsltBitmap:%s DirectoryRsltBitmap:%s ReqBitmap:0x%08x ParamsLen:%d Query:%q Params:%q}", + req.VolumeID, + req.ReqMatches, + formatFileBitmap(req.FileRsltBitmap), + formatDirBitmap(req.DirectoryRsltBitmap), + req.ReqBitmap, + len(req.Parameters), + query, + printable, + ) +} -func (req *FPCatSearchReq) Unmarshal(data []byte) error { return nil } -func (req *FPCatSearchReq) String() string { return "FPCatSearchReq{}" } +func (req *FPCatSearchReq) SearchQuery() string { + if len(req.Parameters) == 0 { + return "" + } + return req.searchPrintableParameters() +} -type FPCatSearchRes struct{} +func (req *FPCatSearchReq) searchPrintableParameters() string { + b := make([]byte, 0, len(req.Parameters)) + for _, c := range req.Parameters { + if c >= 32 && c <= 126 { + b = append(b, c) + continue + } + if len(b) > 0 && b[len(b)-1] != ' ' { + b = append(b, ' ') + } + } + return strings.Join(strings.Fields(string(b)), " ") +} -func (res *FPCatSearchRes) Marshal() []byte { return nil } -func (res *FPCatSearchRes) String() string { return "FPCatSearchRes{}" } +type FPCatSearchRes struct { + CatalogPosition [16]byte + FileRsltBitmap uint16 + DirectoryRsltBitmap uint16 + ActualCount int32 + Data []byte +} + +// WireSize returns 16-byte CatalogPosition + 2-byte FileRsltBitmap + +// 2-byte DirectoryRsltBitmap + 4-byte ActualCount + Data bytes. +func (res *FPCatSearchRes) WireSize() int { + return 24 + len(res.Data) +} + +// MarshalWire encodes the reply block into b. +func (res *FPCatSearchRes) MarshalWire(b []byte) (int, error) { + if len(b) < res.WireSize() { + return 0, binutil.ErrShortBuffer + } + off := 0 + off += copy(b[off:], res.CatalogPosition[:]) + n, _ := binutil.PutU16(b[off:], res.FileRsltBitmap) + off += n + n, _ = binutil.PutU16(b[off:], res.DirectoryRsltBitmap) + off += n + n, _ = binutil.PutU32(b[off:], uint32(res.ActualCount)) + off += n + off += copy(b[off:], res.Data) + return off, nil +} + +// Marshal allocates a buffer and encodes the reply block. +func (res *FPCatSearchRes) Marshal() []byte { + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b +} + +func (res *FPCatSearchRes) String() string { + return fmt.Sprintf("FPCatSearchRes{FileRsltBitmap:%s DirectoryRsltBitmap:%s ActualCount:%d DataLen:%d}", + formatFileBitmap(res.FileRsltBitmap), + formatDirBitmap(res.DirectoryRsltBitmap), + res.ActualCount, + len(res.Data), + ) +} var ( _ RequestModel = (*FPGetSrvrInfoReq)(nil) diff --git a/service/afp/server_models_golden_test.go b/service/afp/server_models_golden_test.go new file mode 100644 index 0000000..e3bac0e --- /dev/null +++ b/service/afp/server_models_golden_test.go @@ -0,0 +1,157 @@ +//go:build afp || all + +package afp + +import ( + "bytes" + "encoding/hex" + "flag" + "os" + "path/filepath" + "testing" +) + +var updateGolden = flag.Bool("update", false, "regenerate golden files in testdata/") + +// goldenBytes loads the named hex golden, or rewrites it from got when -update +// is set. Hex format: whitespace-tolerant lowercase pairs (the file is meant to +// be human-readable, e.g. via `xxd -r -p`). +func goldenBytes(t *testing.T, name string, got []byte) []byte { + t.Helper() + path := filepath.Join("testdata", name) + if *updateGolden { + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + t.Fatalf("mkdir testdata: %v", err) + } + if err := os.WriteFile(path, []byte(hex.EncodeToString(got)+"\n"), 0o644); err != nil { + t.Fatalf("write golden: %v", err) + } + return got + } + raw, err := os.ReadFile(path) + if err != nil { + t.Fatalf("read golden %s (run with -update to create): %v", path, err) + } + stripped := make([]byte, 0, len(raw)) + for _, b := range raw { + if b == ' ' || b == '\n' || b == '\r' || b == '\t' { + continue + } + stripped = append(stripped, b) + } + want, err := hex.DecodeString(string(stripped)) + if err != nil { + t.Fatalf("decode golden %s: %v", path, err) + } + return want +} + +// TestFPMapIDRes_MarshalGolden pins the wire-format output. +func TestFPMapIDRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPMapIDRes{Name: "alice"} + got := res.Marshal() + want := goldenBytes(t, "fpmapidres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +// TestFPMapNameRes_MarshalGolden pins the wire-format output. +func TestFPMapNameRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPMapNameRes{ID: 0x01020304} + got := res.Marshal() + want := goldenBytes(t, "fpmapnameres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +// TestFPGetSrvrMsgRes_MarshalGolden pins the wire-format output. +func TestFPGetSrvrMsgRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPGetSrvrMsgRes{MessageType: 1, Bitmap: 3, Message: "Welcome to OmniTalk"} + got := res.Marshal() + want := goldenBytes(t, "fpgetsrvrmsgres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +// TestFPCatSearchRes_MarshalGolden pins the wire-format output. +func TestFPCatSearchRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPCatSearchRes{ + CatalogPosition: [16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}, + FileRsltBitmap: 0xAABB, + DirectoryRsltBitmap: 0xCCDD, + ActualCount: 42, + Data: []byte("payload bytes"), + } + got := res.Marshal() + want := goldenBytes(t, "fpcatsearchres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +// TestFPGetSrvrParmsRes_MarshalGolden pins the wire-format output of +// FPGetSrvrParmsRes.Marshal. Also asserts Marshal/Unmarshal round-trips. +func TestFPGetSrvrParmsRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPGetSrvrParmsRes{ + ServerTime: 0xDEADBEEF, + Volumes: []VolInfo{ + {Flags: VolInfoFlagHasPassword, Name: "Macintosh HD"}, + {Flags: 0, Name: "Public"}, + }, + } + got := res.Marshal() + want := goldenBytes(t, "fpgetsrvrparmsres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } + var rt FPGetSrvrParmsRes + if err := rt.Unmarshal(got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if rt.ServerTime != res.ServerTime || len(rt.Volumes) != len(res.Volumes) { + t.Fatalf("round-trip mismatch: got %+v, want %+v", rt, *res) + } + for i := range rt.Volumes { + if rt.Volumes[i] != res.Volumes[i] { + t.Fatalf("vol[%d]: got %+v, want %+v", i, rt.Volumes[i], res.Volumes[i]) + } + } +} + +// TestFPLoginRes_MarshalGolden pins the wire-format output of FPLoginRes.Marshal. +func TestFPLoginRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPLoginRes{SRefNum: 0x1234, IDNumber: 0x5678} + got := res.Marshal() + want := goldenBytes(t, "fploginres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +// TestFPGetSrvrInfoRes_MarshalGolden pins the current wire-format output of +// FPGetSrvrInfoRes.Marshal so a future migration to MarshalWire/UnmarshalWire +// (Step 14) can be validated by diff. Run with -update to regenerate. +func TestFPGetSrvrInfoRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPGetSrvrInfoRes{ + MachineType: "OmniTalk", + AFPVersions: []string{"AFPVersion 1.1", "AFPVersion 2.0", "AFPVersion 2.1"}, + UAMs: []string{"No User Authent", "Cleartxt Passwrd"}, + ServerName: "Test Server", + Flags: 0x8000, + } + got := res.Marshal() + want := goldenBytes(t, "fpgetsrvrinfores_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} diff --git a/service/afp/server_test.go b/service/afp/server_test.go index 85c81a5..03b72b0 100644 --- a/service/afp/server_test.go +++ b/service/afp/server_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -13,7 +15,7 @@ import ( ) func TestAFP_FPGetSrvrParms(t *testing.T) { - s := NewAFPService("TestServer", []VolumeConfig{ + s := NewService("TestServer", []VolumeConfig{ {Name: "Vol1", Path: "/tmp/vol1"}, {Name: "Vol2", Path: "/tmp/vol2"}, }, nil, nil) // no need for real FS for this test @@ -75,7 +77,7 @@ func TestAFP_FPGetSrvrParms(t *testing.T) { } func TestAFP_FPGetSrvrParms_NoPerEntryPadding(t *testing.T) { - s := NewAFPService("TestServer", []VolumeConfig{ + s := NewService("TestServer", []VolumeConfig{ {Name: "Test Volume", Path: "/tmp/test"}, {Name: "Volume 68K", Path: "/tmp/68k"}, }, nil, nil) @@ -118,8 +120,103 @@ func TestAFP_FPGetSrvrParms_NoPerEntryPadding(t *testing.T) { } } +func TestAFP_PersistentVolumeIDs_AreDeterministicByName(t *testing.T) { + configs := []VolumeConfig{ + {Name: "Archive", Path: t.TempDir()}, + {Name: "Games", Path: t.TempDir()}, + } + opts := DefaultOptions() + opts.PersistentVolumeIDs = true + + s1 := NewService("TestServer", configs, nil, nil, opts) + s2 := NewService("TestServer", configs, nil, nil, opts) + + if len(s1.Volumes) != len(s2.Volumes) { + t.Fatalf("volume count mismatch: %d vs %d", len(s1.Volumes), len(s2.Volumes)) + } + for i := range s1.Volumes { + if s1.Volumes[i].ID == 0 { + t.Fatalf("volume %q has zero ID", s1.Volumes[i].Config.Name) + } + if s1.Volumes[i].ID != s2.Volumes[i].ID { + t.Fatalf("volume %q ID mismatch across instances: %d vs %d", s1.Volumes[i].Config.Name, s1.Volumes[i].ID, s2.Volumes[i].ID) + } + } +} + +func TestAFP_PersistentVolumeIDs_ResolveNameCollisions(t *testing.T) { + configs := []VolumeConfig{ + {Name: "Shared", Path: filepath.Join(t.TempDir(), "a")}, + {Name: "Shared", Path: filepath.Join(t.TempDir(), "b")}, + } + opts := DefaultOptions() + opts.PersistentVolumeIDs = true + + s := NewService("TestServer", configs, nil, nil, opts) + if len(s.Volumes) != 2 { + t.Fatalf("expected 2 volumes, got %d", len(s.Volumes)) + } + if s.Volumes[0].ID == s.Volumes[1].ID { + t.Fatalf("expected unique IDs for colliding names, got %d", s.Volumes[0].ID) + } +} + +func TestAFP_PersistentVolumeIDs_AreReturnedByOpenVol(t *testing.T) { + root := t.TempDir() + opts := DefaultOptions() + opts.PersistentVolumeIDs = true + + s := NewService("TestServer", []VolumeConfig{{Name: "Archive", Path: root}}, &LocalFileSystem{}, nil, opts) + if len(s.Volumes) != 1 { + t.Fatalf("expected 1 volume, got %d", len(s.Volumes)) + } + wantID := s.Volumes[0].ID + + res, errCode := s.handleOpenVol(&FPOpenVolReq{Bitmap: VolBitmapVolID, VolName: "Archive"}) + if errCode != NoErr { + t.Fatalf("handleOpenVol errCode=%d, want %d", errCode, NoErr) + } + if res.Bitmap&VolBitmapVolID == 0 { + t.Fatalf("response bitmap missing VolID bit: %#04x", res.Bitmap) + } + if len(res.Data) < 2 { + t.Fatalf("response data too short: %d", len(res.Data)) + } + gotID := binary.BigEndian.Uint16(res.Data[:2]) + if gotID != wantID { + t.Fatalf("openvol returned VolumeID=%d, want %d", gotID, wantID) + } +} + +func TestAFP_PersistentVolumeIDs_AreReturnedByGetVolParms(t *testing.T) { + root := t.TempDir() + opts := DefaultOptions() + opts.PersistentVolumeIDs = true + + s := NewService("TestServer", []VolumeConfig{{Name: "Archive", Path: root}}, &LocalFileSystem{}, nil, opts) + if len(s.Volumes) != 1 { + t.Fatalf("expected 1 volume, got %d", len(s.Volumes)) + } + wantID := s.Volumes[0].ID + + res, errCode := s.handleGetVolParms(&FPGetVolParmsReq{VolumeID: wantID, Bitmap: VolBitmapVolID}) + if errCode != NoErr { + t.Fatalf("handleGetVolParms errCode=%d, want %d", errCode, NoErr) + } + if res.Bitmap&VolBitmapVolID == 0 { + t.Fatalf("response bitmap missing VolID bit: %#04x", res.Bitmap) + } + if len(res.Data) < 2 { + t.Fatalf("response data too short: %d", len(res.Data)) + } + gotID := binary.BigEndian.Uint16(res.Data[:2]) + if gotID != wantID { + t.Fatalf("getvolparms returned VolumeID=%d, want %d", gotID, wantID) + } +} + func TestAFP_FPGetSrvrParms_VolumeFlags(t *testing.T) { - s := NewAFPService("TestServer", []VolumeConfig{ + s := NewService("TestServer", []VolumeConfig{ {Name: "ReadOnly", Path: "/tmp/ro", ReadOnly: true}, {Name: "Protected", Path: "/tmp/pw", Password: "secret"}, {Name: "Both", Path: "/tmp/both", Password: "secret", ReadOnly: true}, @@ -151,7 +248,7 @@ func TestAFP_FPGetSrvrParms_VolumeFlags(t *testing.T) { } func TestAFP_GetVolParms_AttributesReadOnlyBitOnly(t *testing.T) { - s := NewAFPService("TestServer", []VolumeConfig{ + s := NewService("TestServer", []VolumeConfig{ {Name: "RW", Path: "/tmp/rw"}, {Name: "RO", Path: "/tmp/ro", ReadOnly: true}, }, nil, nil) @@ -182,7 +279,7 @@ func TestAFP_GetVolParms_AttributesReadOnlyBitOnly(t *testing.T) { } func TestAFP_OtherMethods(t *testing.T) { - s := NewAFPService("TestServer", []VolumeConfig{ + s := NewService("TestServer", []VolumeConfig{ {Name: "Vol1", Path: "/tmp/vol1"}, }, nil, nil) @@ -328,9 +425,35 @@ func (m *mockFS) OpenFile(name string, flag int) (File, error) { func (m *mockFS) Rename(oldpath, newpath string) error { return nil } +func (m *mockFS) Capabilities() FileSystemCapabilities { + return FileSystemCapabilities{ + ReadDirRange: true, + ChildCount: true, + DirAttributes: true, + ReadOnlyState: true, + } +} +func (m *mockFS) CatSearch(volumeRoot string, query string, reqMatches int32, cursor [16]byte) ([]string, [16]byte, int32) { + return nil, cursor, ErrCallNotSupported +} +func (m *mockFS) ChildCount(path string) (uint16, error) { + return 0, newNotSupported("ChildCount") +} +func (m *mockFS) ReadDirRange(path string, startIndex uint16, reqCount uint16) ([]fs.DirEntry, uint16, error) { + return nil, 0, newNotSupported("ReadDirRange") +} +func (m *mockFS) DirAttributes(path string) (uint16, error) { + return 0, nil +} +func (m *mockFS) IsReadOnly(path string) (bool, error) { + return false, nil +} +func (m *mockFS) SupportsCatSearch(path string) (bool, error) { + return false, nil +} func TestAFP_FSDependentMethods(t *testing.T) { - s := NewAFPService("TestServer", []VolumeConfig{ + s := NewService("TestServer", []VolumeConfig{ {Name: "Vol1", Path: "/tmp/vol1"}, }, &mockFS{t: t}, nil) @@ -430,7 +553,7 @@ func TestAFP_GetVolParms_ModDateBytesFreeWireLayout(t *testing.T) { t.Fatalf("Chtimes(root): %v", err) } - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol1", Path: root}}, &mockFS{ + s := NewService("TestServer", []VolumeConfig{{Name: "Vol1", Path: root}}, &mockFS{ t: t, totalBytes: uint64(math.MaxUint32) + 12345, freeBytes: uint64(math.MaxUint32) + 99, @@ -463,7 +586,7 @@ func TestAFP_GetVolParms_ModDateBytesFreeWireLayout(t *testing.T) { } func TestAFP_OpenVolPasswordEnforcement(t *testing.T) { - s := NewAFPService("TestServer", []VolumeConfig{ + s := NewService("TestServer", []VolumeConfig{ {Name: "Vol1", Path: "/tmp/vol1", Password: "secret"}, }, &mockFS{t: t}, nil) @@ -529,7 +652,7 @@ func TestMemoryCNIDStore_ReservedIDs(t *testing.T) { } func TestGetPathDID_RoundTrip(t *testing.T) { - s := NewAFPService("TestServer", []VolumeConfig{ + s := NewService("TestServer", []VolumeConfig{ {Name: "Vol1", Path: filepath.Join("/volumes", "share")}, }, nil, nil) const volumeID = uint16(1) @@ -584,7 +707,7 @@ func TestGetPathDID_RoundTrip(t *testing.T) { func TestGetPathDID_RenamePreservesCNID(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Mac", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Mac", Path: root}}, &LocalFileSystem{}, nil) const volumeID = uint16(1) oldPath := filepath.Join(root, "SimpleText") @@ -618,7 +741,7 @@ func TestAFP_ByteRangeLock_TrashUsageMapInitFlow(t *testing.T) { pathTypeAFP = 2 // long names ) - s := NewAFPService("TestServer", []VolumeConfig{{Name: volName, Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: volName, Path: root}}, &LocalFileSystem{}, nil) if _, errCode := s.handleOpenVol(&FPOpenVolReq{Bitmap: VolBitmapVolID, VolName: volName}); errCode != NoErr { t.Fatalf("OpenVol failed: got %d", errCode) @@ -734,7 +857,7 @@ func TestAFP_ByteRangeLock_TrashUsageMapInitFlow(t *testing.T) { func TestAFP_ByteRangeLock_ErrorSemantics(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Mac", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Mac", Path: root}}, &LocalFileSystem{}, nil) if _, errCode := s.handleOpenVol(&FPOpenVolReq{Bitmap: VolBitmapVolID, VolName: "Mac"}); errCode != NoErr { t.Fatalf("OpenVol failed: got %d", errCode) @@ -807,8 +930,8 @@ func TestAFP_ByteRangeLock_ErrorSemantics(t *testing.T) { func TestAFP_ByteRangeLock_NoMoreLocks(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Mac", Path: root}}, &LocalFileSystem{}, nil) - s.maxLocks = 1 + s := NewService("TestServer", []VolumeConfig{{Name: "Mac", Path: root}}, &LocalFileSystem{}, nil) + s.forks.maxLocks = 1 if _, errCode := s.handleOpenVol(&FPOpenVolReq{Bitmap: VolBitmapVolID, VolName: "Mac"}); errCode != NoErr { t.Fatalf("OpenVol failed: got %d", errCode) diff --git a/service/afp/session_state.go b/service/afp/session_state.go new file mode 100644 index 0000000..b008824 --- /dev/null +++ b/service/afp/session_state.go @@ -0,0 +1,47 @@ +//go:build afp || all + +package afp + +import "sync" + +// sessionState owns the small set of fields used by Login / AddUser to +// authenticate clients and hand out session reference numbers. Carved out of +// Service so that auth-path code paths do not contend with fork, desktop, or +// volume state under a single shared mutex. +type sessionState struct { + mu sync.Mutex + users map[string]string // map[username]password + nextSRef uint16 +} + +func newSessionState() sessionState { + return sessionState{ + users: make(map[string]string), + nextSRef: 1, + } +} + +// allocSRef returns the next session reference number. +func (s *sessionState) allocSRef() uint16 { + s.mu.Lock() + defer s.mu.Unlock() + n := s.nextSRef + s.nextSRef++ + return n +} + +// checkPassword returns true when the supplied credentials match a registered +// user. An unknown username yields false without distinguishing it from a +// password mismatch. +func (s *sessionState) checkPassword(username, password string) bool { + s.mu.Lock() + defer s.mu.Unlock() + expected, ok := s.users[username] + return ok && expected == password +} + +func (s *sessionState) addUser(username, password string) { + s.mu.Lock() + defer s.mu.Unlock() + s.users[username] = password +} diff --git a/service/afp/sqlite_store.go b/service/afp/sqlite_store.go deleted file mode 100644 index 925869c..0000000 --- a/service/afp/sqlite_store.go +++ /dev/null @@ -1,50 +0,0 @@ -package afp - -import ( - "database/sql" - "fmt" - "os" - "path/filepath" - - "github.com/pgodw/omnitalk/go/netlog" - _ "modernc.org/sqlite" -) - -const afpSQLiteFilename = "_.afp.db" - -func sqliteDBPath(volumeRootPath string) string { - return filepath.Join(filepath.Clean(volumeRootPath), afpSQLiteFilename) -} - -func openSQLiteDB(volumeRootPath string) (*sql.DB, error) { - dbPath := sqliteDBPath(volumeRootPath) - if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil { - return nil, fmt.Errorf("create sqlite dir for %q: %w", dbPath, err) - } - db, err := sql.Open("sqlite", dbPath) - if err != nil { - return nil, fmt.Errorf("open sqlite db %q: %w", dbPath, err) - } - // Single-writer access pattern avoids lock contention and keeps behavior - // deterministic across concurrent AFP operations. - db.SetMaxOpenConns(1) - // Do not retain idle connections so temp-volume DB files are not held open - // on Windows between AFP operations. - db.SetMaxIdleConns(0) - - pragmas := []string{ - "PRAGMA journal_mode=WAL", - "PRAGMA synchronous=NORMAL", - "PRAGMA foreign_keys=ON", - "PRAGMA busy_timeout=5000", - } - for _, stmt := range pragmas { - if _, execErr := db.Exec(stmt); execErr != nil { - db.Close() - return nil, fmt.Errorf("sqlite pragma %q on %q: %w", stmt, dbPath, execErr) - } - } - - netlog.Info("[AFP][SQLite] opened %q", dbPath) - return db, nil -} diff --git a/service/afp/testdata/fpbyterangelockres_basic.hex b/service/afp/testdata/fpbyterangelockres_basic.hex new file mode 100644 index 0000000..4e55497 --- /dev/null +++ b/service/afp/testdata/fpbyterangelockres_basic.hex @@ -0,0 +1 @@ +0badf00d diff --git a/service/afp/testdata/fpcatsearchres_basic.hex b/service/afp/testdata/fpcatsearchres_basic.hex new file mode 100644 index 0000000..2d7b450 --- /dev/null +++ b/service/afp/testdata/fpcatsearchres_basic.hex @@ -0,0 +1 @@ +0102030405060708090a0b0c0d0e0f10aabbccdd0000002a7061796c6f6164206279746573 diff --git a/service/afp/testdata/fpcreatedirres_basic.hex b/service/afp/testdata/fpcreatedirres_basic.hex new file mode 100644 index 0000000..5ca31d5 --- /dev/null +++ b/service/afp/testdata/fpcreatedirres_basic.hex @@ -0,0 +1 @@ +deadbeef diff --git a/service/afp/testdata/fpenumerateres_basic.hex b/service/afp/testdata/fpenumerateres_basic.hex new file mode 100644 index 0000000..1e0ddd7 --- /dev/null +++ b/service/afp/testdata/fpenumerateres_basic.hex @@ -0,0 +1 @@ +07fb0dff0003656e756d65726174652d7061796c6f6164 diff --git a/service/afp/testdata/fpgetapplres_basic.hex b/service/afp/testdata/fpgetapplres_basic.hex new file mode 100644 index 0000000..91fdc53 --- /dev/null +++ b/service/afp/testdata/fpgetapplres_basic.hex @@ -0,0 +1 @@ +07fbdeadbeef0102030405060708 diff --git a/service/afp/testdata/fpgetcommentres_basic.hex b/service/afp/testdata/fpgetcommentres_basic.hex new file mode 100644 index 0000000..2f2f7f4 --- /dev/null +++ b/service/afp/testdata/fpgetcommentres_basic.hex @@ -0,0 +1 @@ +0f48656c6c6f2c20636f6d6d656e7421 diff --git a/service/afp/testdata/fpgetdirparmsres_basic.hex b/service/afp/testdata/fpgetdirparmsres_basic.hex new file mode 100644 index 0000000..684ea53 --- /dev/null +++ b/service/afp/testdata/fpgetdirparmsres_basic.hex @@ -0,0 +1 @@ +0dff8000deadbeef diff --git a/service/afp/testdata/fpgetfiledirparmsres_dir.hex b/service/afp/testdata/fpgetfiledirparmsres_dir.hex new file mode 100644 index 0000000..6adf307 --- /dev/null +++ b/service/afp/testdata/fpgetfiledirparmsres_dir.hex @@ -0,0 +1 @@ +07fb0dff800011223344 diff --git a/service/afp/testdata/fpgetfiledirparmsres_file.hex b/service/afp/testdata/fpgetfiledirparmsres_file.hex new file mode 100644 index 0000000..07c6589 --- /dev/null +++ b/service/afp/testdata/fpgetfiledirparmsres_file.hex @@ -0,0 +1 @@ +07fb0dff0000aabbcc diff --git a/service/afp/testdata/fpgetfileparmsres_basic.hex b/service/afp/testdata/fpgetfileparmsres_basic.hex new file mode 100644 index 0000000..0d0bf25 --- /dev/null +++ b/service/afp/testdata/fpgetfileparmsres_basic.hex @@ -0,0 +1 @@ +07fb0000cafebabe diff --git a/service/afp/testdata/fpgetforkparmsres_basic.hex b/service/afp/testdata/fpgetforkparmsres_basic.hex new file mode 100644 index 0000000..4ed2091 --- /dev/null +++ b/service/afp/testdata/fpgetforkparmsres_basic.hex @@ -0,0 +1 @@ +06000000100000002000 diff --git a/service/afp/testdata/fpgetsrvrinfores_basic.hex b/service/afp/testdata/fpgetsrvrinfores_basic.hex new file mode 100644 index 0000000..725b3c6 --- /dev/null +++ b/service/afp/testdata/fpgetsrvrinfores_basic.hex @@ -0,0 +1 @@ +0016001f004d000080000b5465737420536572766572084f6d6e6954616c6b030e41465056657273696f6e20312e310e41465056657273696f6e20322e300e41465056657273696f6e20322e31020f4e6f20557365722041757468656e7410436c6561727478742050617373777264 diff --git a/service/afp/testdata/fpgetsrvrmsgres_basic.hex b/service/afp/testdata/fpgetsrvrmsgres_basic.hex new file mode 100644 index 0000000..b5c691f --- /dev/null +++ b/service/afp/testdata/fpgetsrvrmsgres_basic.hex @@ -0,0 +1 @@ +000100031357656c636f6d6520746f204f6d6e6954616c6b diff --git a/service/afp/testdata/fpgetsrvrparmsres_basic.hex b/service/afp/testdata/fpgetsrvrparmsres_basic.hex new file mode 100644 index 0000000..2793e8e --- /dev/null +++ b/service/afp/testdata/fpgetsrvrparmsres_basic.hex @@ -0,0 +1 @@ +deadbeef02010c4d6163696e746f736820484400065075626c6963 diff --git a/service/afp/testdata/fpgetvolparmsres_basic.hex b/service/afp/testdata/fpgetvolparmsres_basic.hex new file mode 100644 index 0000000..723dc4a --- /dev/null +++ b/service/afp/testdata/fpgetvolparmsres_basic.hex @@ -0,0 +1 @@ +beef766f6c7061726d732d7061796c6f6164 diff --git a/service/afp/testdata/fploginres_basic.hex b/service/afp/testdata/fploginres_basic.hex new file mode 100644 index 0000000..97b5955 --- /dev/null +++ b/service/afp/testdata/fploginres_basic.hex @@ -0,0 +1 @@ +12345678 diff --git a/service/afp/testdata/fpmapidres_basic.hex b/service/afp/testdata/fpmapidres_basic.hex new file mode 100644 index 0000000..e0928b9 --- /dev/null +++ b/service/afp/testdata/fpmapidres_basic.hex @@ -0,0 +1 @@ +05616c696365 diff --git a/service/afp/testdata/fpmapnameres_basic.hex b/service/afp/testdata/fpmapnameres_basic.hex new file mode 100644 index 0000000..e626597 --- /dev/null +++ b/service/afp/testdata/fpmapnameres_basic.hex @@ -0,0 +1 @@ +01020304 diff --git a/service/afp/testdata/fpopendirres_basic.hex b/service/afp/testdata/fpopendirres_basic.hex new file mode 100644 index 0000000..2562492 --- /dev/null +++ b/service/afp/testdata/fpopendirres_basic.hex @@ -0,0 +1 @@ +cafef00d diff --git a/service/afp/testdata/fpopendtres_basic.hex b/service/afp/testdata/fpopendtres_basic.hex new file mode 100644 index 0000000..ea17b16 --- /dev/null +++ b/service/afp/testdata/fpopendtres_basic.hex @@ -0,0 +1 @@ +cafe diff --git a/service/afp/testdata/fpopenforkres_basic.hex b/service/afp/testdata/fpopenforkres_basic.hex new file mode 100644 index 0000000..8244392 --- /dev/null +++ b/service/afp/testdata/fpopenforkres_basic.hex @@ -0,0 +1 @@ +07fb1234deadbeef diff --git a/service/afp/testdata/fpopenvolres_basic.hex b/service/afp/testdata/fpopenvolres_basic.hex new file mode 100644 index 0000000..8364a6f --- /dev/null +++ b/service/afp/testdata/fpopenvolres_basic.hex @@ -0,0 +1 @@ +1234aabbccddee diff --git a/service/afp/testdata/fpwriteres_basic.hex b/service/afp/testdata/fpwriteres_basic.hex new file mode 100644 index 0000000..97b5955 --- /dev/null +++ b/service/afp/testdata/fpwriteres_basic.hex @@ -0,0 +1 @@ +12345678 diff --git a/service/afp/transport.go b/service/afp/transport.go index 8fabe6e..5da5895 100644 --- a/service/afp/transport.go +++ b/service/afp/transport.go @@ -1,9 +1,14 @@ +//go:build afp || all + package afp import ( - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" + "context" + + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/service" ) // CommandHandler handles decoded AFP commands from transport protocols. @@ -15,12 +20,19 @@ type CommandHandler interface { // Transport represents a network transport that serves the AFP protocol (e.g., ASP over DDP, or DSI over TCP/IP). type Transport interface { // Start starts the transport using the provided router (for AppleTalk NBP/routing). - Start(router service.Router) error + Start(ctx context.Context, router service.Router) error // Stop shuts down the transport and cleans up any resources. Stop() error // Inbound processes an incoming AppleTalk datagram, if the transport uses DDP. // For IP-only transports, this can be a no-op. - Inbound(d appletalk.Datagram, p port.Port) + Inbound(d ddp.Datagram, p port.Port) + + // MaxReadSize returns the largest single-reply payload the transport can + // deliver, used by AFP to cap FPRead ReqCount and any range-limited + // filesystem fetches. Transports without a fixed limit return 0. + // Called by AFP after the transport has resolved its quantum (e.g. ASP + // after SPGetParms); MaxReadSize before that point may return 0. + MaxReadSize() int } diff --git a/service/afp/types.go b/service/afp/types.go index f5c58dc..f0b6b33 100644 --- a/service/afp/types.go +++ b/service/afp/types.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp // Debug enables debug logging for AFP server. @@ -138,7 +140,20 @@ const ( // attributes word; only the ReadOnly flag (bit 0) is defined here. const ( // VolAttrReadOnly indicates the volume is read-only (bit 0). - VolAttrReadOnly uint16 = 1 << 0 + VolAttrReadOnly uint16 = 1 << 0 + VolAttrVolumePassword uint16 = 0x02 + VolAttrSupportsFileIDs uint16 = 0x04 + VolAttrSupportsCatSearch uint16 = 0x08 + VolAttrSupportsBlankAccessPrivs uint16 = 0x10 + VolAttrSupportsUnixPrivs uint16 = 0x20 + VolAttrSupportsUTF8Names uint16 = 0x40 + VolAttrNoNetworkUserIDs uint16 = 0x80 + VolAttrDefaultPrivsFromParent uint16 = 0x100 + VolAttrNoExchangeFiles uint16 = 0x200 + VolAttrSupportsExtAttrs uint16 = 0x400 + VolAttrSupportsACLs uint16 = 0x800 + VolAttrCaseSensitive uint16 = 0x1000 + VolAttrSupportsTMLockSteal uint16 = 0x2000 ) // File and directory attribute flags returned in the Attributes field @@ -177,3 +192,86 @@ const ( // Context comments preserved as aliases where the semantic note is useful. ErrObjectExistsSoftCreate int32 = ErrObjectExists // soft-create failed because object already exists ) + +// AFP Commands. +// Inside Macintosh: Networking. +const ( + FPByteRangeLock = 1 // lock byte ranges in an open fork. + FPCloseVol = 2 // notify server that a workstation no longer needs a volume. + FPCloseDir = 3 // close a directory on a variable Directory ID volume. + FPCloseFork = 4 // close an open fork. + FPCopyFile = 5 // copy a file from one server volume to another. + FPCreateDir = 6 // create a new directory. + FPCreateFile = 7 // create a new file. + FPDelete = 8 // delete a file or empty directory. + FPEnumerate = 9 // list files and directories within a directory. + FPFlush = 10 // flush data associated with a volume to disk. + FPFlushFork = 11 // write an open fork's internal buffers to disk. + FPGetDirParms = 12 + FPGetFileParms = 13 + FPGetForkParms = 14 // read an open fork's parameters. + FPGetSrvrInfo = 15 // get server information (name, version strings, UAMs, flags) without opening a session. + FPGetSrvrParms = 16 // get list of server volumes after a session is established. + FPGetVolParms = 17 // get parameters for a given volume. + FPLogin = 18 // authenticate user and establish a session. + FPLoginCont = 19 // continue multi-step user authentication process. + FPLogout = 20 // terminate an AFP session. + FPMapID = 21 // map user or group ID to the corresponding name. + FPMapName = 22 // map user or group name to the corresponding ID. + FPMoveAndRename = 23 // move and optionally rename a file or directory to a different parent directory. + FPOpenVol = 24 // request access to a volume, optionally providing a password. + FPOpenDir = 25 // open a directory on a variable Directory ID volume to retrieve its Directory ID. + FPOpenFork = 26 // open a data or resource fork of an existing file. + FPGetSrvrMsg = 38 + FPRead = 27 // read data from an open fork. + FPRename = 28 // rename a file or directory. + FPSetDirParms = 29 // change parameters of a specified directory. + FPSetFileParms = 30 // change parameters of a specified file. + FPSetForkParms = 31 // change parameters of an open fork. + FPSetVolParms = 32 // change parameters of a specified volume. + FPWrite = 33 // write data to an open fork. + FPGetFileDirParms = 34 // get parameters associated with a given file or directory. + FPSetFileDirParms = 35 // set parameters common to both files and directories. + FPChangePassword = 36 // change a user's password. + FPGetUserInfo = 37 // retrieve information about a user (AFP 2.0+). + + // AFP 2.2 additions. + FPExchangeFiles = 42 + + // AFP 2.1 catalogued search. + FPCatSearch = 43 + + // AFP 2.0+ Desktop Database commands (Inside Macintosh: Networking §C). + // Finder uses these to store/retrieve icons, application mappings, and comments. + FPOpenDT = 48 // open the Desktop database for access. + FPCloseDT = 49 // close access to the Desktop database. + FPGetIcon = 51 // retrieve a specific icon bitmap from the Desktop database. + FPGetIconInfo = 52 // get description or determine set of icons for an application. + FPAddAPPL = 53 // register an application mapping (APPL) in the Desktop database. + FPRemoveAPPL = 54 // remove an application mapping from the Desktop database. + FPGetAPPL = 55 // get an application mapping from the Desktop database. + FPAddComment = 56 // add or replace a Finder comment for a file or directory. + FPRemoveComment = 57 // remove a Finder comment for a file or directory. + FPGetComment = 58 // retrieve a Finder comment for a file or directory. + FPAddIcon = 192 // add a new icon bitmap to the Desktop database. (special: maps to ASPUserWrite) +) + +// forkHandle tracks an open fork (data or resource). +type forkHandle struct { + file File // nil for an empty resource fork + isRsrc bool + rsrcOff int64 // offset within the AppleDouble file where resource data starts + rsrcLen int64 // current length of resource fork data + rsrcLenFieldAt int64 // file offset of the ResourceFork entry's length field in the AppleDouble header + filePath string // absolute path of the file whose fork is open + volID uint16 // volume this fork belongs to +} + +type byteRangeLock struct { + lockKey string + ownerFork uint16 + start int64 + length int64 // -1 means open-ended (to EOF) +} + +const defaultMaxByteRangeLocks = 4096 diff --git a/service/afp/volume.go b/service/afp/volume.go index 0599b7c..aec8658 100644 --- a/service/afp/volume.go +++ b/service/afp/volume.go @@ -1,12 +1,18 @@ +//go:build afp || all + package afp import ( "bytes" - "encoding/binary" - "log" + "fmt" + "hash/crc32" "math" "path/filepath" + "strings" "time" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/pkg/binutil" ) const ( @@ -14,6 +20,68 @@ const ( defaultAFPBytesTotal = uint64(0x20000000) ) +// installVolumes builds per-volume state from VolumeConfigs: assigns the +// volume ID, opens the CNID store, resolves the FileSystem backend, and +// wires the AppleDouble metadata backend. fallbackFS, when non-nil, wins +// over the per-volume registry lookup (used by tests that inject a single +// shared FileSystem). +func (s *Service) installVolumes(configs []VolumeConfig, fallbackFS FileSystem) { + cnidBackend := resolveCNIDBackend(s.options) + usedVolumeIDs := make(map[uint16]struct{}, len(configs)) + + for i, cfg := range configs { + volume := Volume{ + Config: cfg, + ID: s.assignVolumeID(cfg, i, usedVolumeIDs), + } + s.Volumes = append(s.Volumes, volume) + + store := cnidBackend.Open(volume) + store.EnsureReserved(filepath.Clean(cfg.Path), CNIDRoot) + s.cnidStores[volume.ID] = store + + s.volumeFS[volume.ID] = resolveVolumeFS(cfg, fallbackFS) + s.installAppleDoubleBackend(volume.ID, cfg, fallbackFS) + } +} + +func (s *Service) assignVolumeID(cfg VolumeConfig, i int, used map[uint16]struct{}) uint16 { + if s.options.PersistentVolumeIDs { + return persistentVolumeIDForConfig(cfg, used) + } + id := uint16(i + 1) + used[id] = struct{}{} + return id +} + +func resolveVolumeFS(cfg VolumeConfig, fallbackFS FileSystem) FileSystem { + if fallbackFS != nil { + return fallbackFS + } + if backend, err := newBackendForVolumeConfig(cfg); err == nil { + return backend + } + return nil +} + +func (s *Service) installAppleDoubleBackend(volID uint16, cfg VolumeConfig, fallbackFS FileSystem) { + if s.metas == nil { + return + } + metaFS := s.volumeFS[volID] + if metaFS == nil { + metaFS = fallbackFS + } + if metaFS == nil { + return + } + mode := cfg.AppleDoubleMode + if mode == "" { + mode = s.options.AppleDoubleMode + } + s.metas[volID] = NewAppleDoubleBackend(metaFS, mode, s.options.DecomposedFilenames) +} + func constrainAFPVolumeType(volType uint16) uint16 { switch volType { case AFPVolumeTypeFlat, AFPVolumeTypeFixedDirID, AFPVolumeTypeVariableDirID: @@ -23,7 +91,7 @@ func constrainAFPVolumeType(volType uint16) uint16 { } } -func (s *AFPService) volumeType(_ *Volume) uint16 { +func (s *Service) volumeType(_ *Volume) uint16 { // OmniTalk exposes hierarchical volumes with CNID-based directory IDs, // so we advertise Variable Directory ID semantics. return constrainAFPVolumeType(AFPVolumeTypeFixedDirID) @@ -36,12 +104,32 @@ func capAFPBytes32(v uint64) uint32 { return uint32(v) } -func (s *AFPService) handleCloseVol(req *FPCloseVolReq) (*FPCloseVolRes, int32) { - log.Printf("[AFP] FPCloseVol for Volume ID %d", req.VolumeID) +func (s *Service) volumeAttributes(vol *Volume) uint16 { + if vol == nil { + return 0 + } + attrs := uint16(0) + if s.volumeIsReadOnly(vol.ID) { + attrs |= VolAttrReadOnly + } + volFS := s.fsForVolume(vol.ID) + if volFS != nil { + volumeRoot := filepath.Clean(vol.Config.Path) + if volFS.Capabilities().CatSearch { + if supported, err := volFS.SupportsCatSearch(volumeRoot); err == nil && supported { + attrs |= VolAttrSupportsCatSearch + } + } + } + return attrs +} + +func (s *Service) handleCloseVol(req *FPCloseVolReq) (*FPCloseVolRes, int32) { + netlog.Debug("[AFP] FPCloseVol for Volume ID %d", req.VolumeID) return &FPCloseVolRes{}, NoErr } -func (s *AFPService) handleOpenVol(req *FPOpenVolReq) (*FPOpenVolRes, int32) { +func (s *Service) handleOpenVol(req *FPOpenVolReq) (*FPOpenVolRes, int32) { // handleOpenVol implements the FPOpenVol operation. // // Algorithm (summary): Ensure the requested volume exists and the @@ -86,68 +174,15 @@ func (s *AFPService) handleOpenVol(req *FPOpenVolReq) (*FPOpenVolRes, int32) { if store, ok := s.cnidStore(targetVol.ID); ok { store.EnsureReserved(cleanRoot, CNIDRoot) } - volDate := s.volumeDate(targetVol) - bytesFree, bytesTotal := s.volumeCapacity(targetVol) - - fixedSize := calcVolParamsSize(req.Bitmap) - fixed := new(bytes.Buffer) - var varBuf bytes.Buffer - - s.mu.RLock() - backupDate := s.volumeBackupDate[targetVol.ID] - s.mu.RUnlock() - - if req.Bitmap&VolBitmapAttributes != 0 { - volAttrs := uint16(0) - if targetVol.Config.ReadOnly { - volAttrs |= VolAttrReadOnly - } - binary.Write(fixed, binary.BigEndian, volAttrs) - } - if req.Bitmap&VolBitmapSignature != 0 { - binary.Write(fixed, binary.BigEndian, s.volumeType(targetVol)) - } - if req.Bitmap&VolBitmapCreateDate != 0 { - binary.Write(fixed, binary.BigEndian, volDate) - } - if req.Bitmap&VolBitmapModDate != 0 { - binary.Write(fixed, binary.BigEndian, volDate) - } - if req.Bitmap&VolBitmapBackupDate != 0 { - binary.Write(fixed, binary.BigEndian, backupDate) - } - if req.Bitmap&VolBitmapVolID != 0 { - binary.Write(fixed, binary.BigEndian, targetVol.ID) - } - if req.Bitmap&VolBitmapBytesFree != 0 { - binary.Write(fixed, binary.BigEndian, capAFPBytes32(bytesFree)) - } - if req.Bitmap&VolBitmapBytesTotal != 0 { - binary.Write(fixed, binary.BigEndian, capAFPBytes32(bytesTotal)) - } - if req.Bitmap&VolBitmapName != 0 { - binary.Write(fixed, binary.BigEndian, uint16(fixedSize+varBuf.Len())) - s.writeAFPName(&varBuf, targetVol.Config.Name, targetVol.ID) - } - if req.Bitmap&VolBitmapExtBytesFree != 0 { - binary.Write(fixed, binary.BigEndian, bytesFree) - } - if req.Bitmap&VolBitmapExtBytesTotal != 0 { - binary.Write(fixed, binary.BigEndian, bytesTotal) - } - if req.Bitmap&VolBitmapBlockSize != 0 { - binary.Write(fixed, binary.BigEndian, uint32(4096)) - } res := &FPOpenVolRes{ Bitmap: req.Bitmap, - Data: append(fixed.Bytes(), varBuf.Bytes()...), + Data: s.packVolumeParams(targetVol, req.Bitmap), } - return res, NoErr } -func (s *AFPService) volumeRootByID(volumeID uint16) (string, bool) { +func (s *Service) volumeRootByID(volumeID uint16) (string, bool) { for i := range s.Volumes { if s.Volumes[i].ID == volumeID { return filepath.Clean(s.Volumes[i].Config.Path), true @@ -156,7 +191,7 @@ func (s *AFPService) volumeRootByID(volumeID uint16) (string, bool) { return "", false } -func (s *AFPService) volumeByID(volumeID uint16) (Volume, bool) { +func (s *Service) volumeByID(volumeID uint16) (Volume, bool) { for i := range s.Volumes { if s.Volumes[i].ID == volumeID { return s.Volumes[i], true @@ -165,28 +200,39 @@ func (s *AFPService) volumeByID(volumeID uint16) (Volume, bool) { return Volume{}, false } -func (s *AFPService) volumeIsReadOnly(volumeID uint16) bool { +func (s *Service) volumeIsReadOnly(volumeID uint16) bool { for i := range s.Volumes { if s.Volumes[i].ID == volumeID { - return s.Volumes[i].Config.ReadOnly + if s.Volumes[i].Config.ReadOnly { + return true + } + volFS := s.fsForVolume(volumeID) + if volFS != nil { + if volFS.Capabilities().ReadOnlyState { + if readonly, err := volFS.IsReadOnly(filepath.Clean(s.Volumes[i].Config.Path)); err == nil { + return readonly + } + } + } + return false } } return false } -func (s *AFPService) volumeDate(vol *Volume) uint32 { +func (s *Service) volumeDate(vol *Volume) uint32 { if vol == nil { return toAFPTime(time.Now()) } - if s.fs != nil { - if info, err := s.fs.Stat(filepath.Clean(vol.Config.Path)); err == nil && info != nil { + if volFS := s.fsForVolume(vol.ID); volFS != nil { + if info, err := volFS.Stat(filepath.Clean(vol.Config.Path)); err == nil && info != nil { return toAFPTime(info.ModTime()) } } return toAFPTime(time.Now()) } -func (s *AFPService) resolveVolumePath(volumeID uint16, dirID uint32, relPath string, pathType uint8) (string, int32) { +func (s *Service) resolveVolumePath(volumeID uint16, dirID uint32, relPath string, pathType uint8) (string, int32) { basePath, ok := s.getDIDPath(volumeID, dirID) if !ok { if dirID == 0 { @@ -212,7 +258,7 @@ func (s *AFPService) resolveVolumePath(volumeID uint16, dirID uint32, relPath st return full, NoErr } -func (s *AFPService) handleGetVolParms(req *FPGetVolParmsReq) (*FPGetVolParmsRes, int32) { +func (s *Service) handleGetVolParms(req *FPGetVolParmsReq) (*FPGetVolParmsRes, int32) { // handleGetVolParms implements the FPGetVolParms operation. // // Algorithm (summary): Verify the volume exists and that the @@ -239,66 +285,69 @@ func (s *AFPService) handleGetVolParms(req *FPGetVolParmsReq) (*FPGetVolParmsRes return &FPGetVolParmsRes{}, ErrBitmapErr } - fixedSize := calcVolParamsSize(req.Bitmap) + res := &FPGetVolParmsRes{ + Bitmap: req.Bitmap, + Data: s.packVolumeParams(targetVol, req.Bitmap), + } + return res, NoErr +} + +// packVolumeParams emits the AFP "volume parameters block" for vol per the +// caller-supplied bitmap (AFP 2.x §5.1.30). Variable-length fields (the +// volume name) are appended after the fixed section and referenced by an +// offset relative to the start of the parameters block. +func (s *Service) packVolumeParams(vol *Volume, bitmap uint16) []byte { + fixedSize := calcVolParamsSize(bitmap) fixed := new(bytes.Buffer) var varBuf bytes.Buffer - volDate := s.volumeDate(targetVol) - bytesFree, bytesTotal := s.volumeCapacity(targetVol) - s.mu.RLock() - backupDate := s.volumeBackupDate[req.VolumeID] - s.mu.RUnlock() + volDate := s.volumeDate(vol) + bytesFree, bytesTotal := s.volumeCapacity(vol) - if req.Bitmap&VolBitmapAttributes != 0 { - volAttrs := uint16(0) - if targetVol.Config.ReadOnly { - volAttrs |= VolAttrReadOnly - } - binary.Write(fixed, binary.BigEndian, volAttrs) + backupDate := s.backupDates.get(vol.ID) + + if bitmap&VolBitmapAttributes != 0 { + binutil.WriteU16(fixed, s.volumeAttributes(vol)) } - if req.Bitmap&VolBitmapSignature != 0 { - binary.Write(fixed, binary.BigEndian, s.volumeType(targetVol)) + if bitmap&VolBitmapSignature != 0 { + binutil.WriteU16(fixed, s.volumeType(vol)) } - if req.Bitmap&VolBitmapCreateDate != 0 { - binary.Write(fixed, binary.BigEndian, volDate) + if bitmap&VolBitmapCreateDate != 0 { + binutil.WriteU32(fixed, volDate) } - if req.Bitmap&VolBitmapModDate != 0 { - binary.Write(fixed, binary.BigEndian, volDate) + if bitmap&VolBitmapModDate != 0 { + binutil.WriteU32(fixed, volDate) } - if req.Bitmap&VolBitmapBackupDate != 0 { - binary.Write(fixed, binary.BigEndian, backupDate) + if bitmap&VolBitmapBackupDate != 0 { + binutil.WriteU32(fixed, backupDate) } - if req.Bitmap&VolBitmapVolID != 0 { - binary.Write(fixed, binary.BigEndian, targetVol.ID) + if bitmap&VolBitmapVolID != 0 { + binutil.WriteU16(fixed, vol.ID) } - if req.Bitmap&VolBitmapBytesFree != 0 { - binary.Write(fixed, binary.BigEndian, capAFPBytes32(bytesFree)) + if bitmap&VolBitmapBytesFree != 0 { + binutil.WriteU32(fixed, capAFPBytes32(bytesFree)) } - if req.Bitmap&VolBitmapBytesTotal != 0 { - binary.Write(fixed, binary.BigEndian, capAFPBytes32(bytesTotal)) + if bitmap&VolBitmapBytesTotal != 0 { + binutil.WriteU32(fixed, capAFPBytes32(bytesTotal)) } - if req.Bitmap&VolBitmapName != 0 { - binary.Write(fixed, binary.BigEndian, uint16(fixedSize+varBuf.Len())) - s.writeAFPName(&varBuf, targetVol.Config.Name, targetVol.ID) + if bitmap&VolBitmapName != 0 { + binutil.WriteU16(fixed, uint16(fixedSize+varBuf.Len())) + s.writeAFPName(&varBuf, vol.Config.Name, vol.ID) } - if req.Bitmap&VolBitmapExtBytesFree != 0 { - binary.Write(fixed, binary.BigEndian, bytesFree) + if bitmap&VolBitmapExtBytesFree != 0 { + binutil.WriteU64(fixed, bytesFree) } - if req.Bitmap&VolBitmapExtBytesTotal != 0 { - binary.Write(fixed, binary.BigEndian, bytesTotal) + if bitmap&VolBitmapExtBytesTotal != 0 { + binutil.WriteU64(fixed, bytesTotal) } - if req.Bitmap&VolBitmapBlockSize != 0 { - binary.Write(fixed, binary.BigEndian, uint32(4096)) + if bitmap&VolBitmapBlockSize != 0 { + binutil.WriteU32(fixed, 4096) } - res := &FPGetVolParmsRes{ - Bitmap: req.Bitmap, - Data: append(fixed.Bytes(), varBuf.Bytes()...), - } - return res, NoErr + return append(fixed.Bytes(), varBuf.Bytes()...) } -func (s *AFPService) handleSetVolParms(req *FPSetVolParmsReq) (*FPSetVolParmsRes, int32) { +func (s *Service) handleSetVolParms(req *FPSetVolParmsReq) (*FPSetVolParmsRes, int32) { if s.volumeIsReadOnly(req.VolumeID) { return &FPSetVolParmsRes{}, ErrVolLocked } @@ -317,23 +366,184 @@ func (s *AFPService) handleSetVolParms(req *FPSetVolParmsReq) (*FPSetVolParmsRes return &FPSetVolParmsRes{}, ErrParamErr } - s.mu.Lock() - s.volumeBackupDate[req.VolumeID] = req.BackupDate - s.mu.Unlock() + s.backupDates.set(req.VolumeID, req.BackupDate) return &FPSetVolParmsRes{}, NoErr } -func (s *AFPService) volumeCapacity(vol *Volume) (bytesFree uint64, bytesTotal uint64) { +func (s *Service) volumeCapacity(vol *Volume) (bytesFree uint64, bytesTotal uint64) { bytesFree = defaultAFPBytesFree bytesTotal = defaultAFPBytesTotal - if vol == nil || s.fs == nil { + if vol == nil { + return bytesFree, bytesTotal + } + volFS := s.fsForVolume(vol.ID) + if volFS == nil { return bytesFree, bytesTotal } - total, free, err := s.fs.DiskUsage(filepath.Clean(vol.Config.Path)) + total, free, err := volFS.DiskUsage(filepath.Clean(vol.Config.Path)) if err != nil { return bytesFree, bytesTotal } return free, total } + +// calcVolParamsSize returns the total byte size of all fixed fields +// (including the variable-name offset pointer) in a volume parameter +// block for the given bitmap. The variable-length name itself is +// emitted into a separate buffer and concatenated by the caller. +func calcVolParamsSize(bitmap uint16) int { + size := 0 + if bitmap&VolBitmapAttributes != 0 { + size += 2 + } + if bitmap&VolBitmapSignature != 0 { + size += 2 + } + if bitmap&VolBitmapCreateDate != 0 { + size += 4 + } + if bitmap&VolBitmapModDate != 0 { + size += 4 + } + if bitmap&VolBitmapBackupDate != 0 { + size += 4 + } + if bitmap&VolBitmapVolID != 0 { + size += 2 + } + if bitmap&VolBitmapBytesFree != 0 { + size += 4 + } + if bitmap&VolBitmapBytesTotal != 0 { + size += 4 + } + if bitmap&VolBitmapName != 0 { + size += 2 // offset pointer + } + if bitmap&VolBitmapExtBytesFree != 0 { + size += 8 + } + if bitmap&VolBitmapExtBytesTotal != 0 { + size += 8 + } + if bitmap&VolBitmapBlockSize != 0 { + size += 4 + } + return size +} + +// catalogNameForPath returns the configured volume name when fullPath +// is the volume root, otherwise fallbackName. AFP clients see the +// configured volume name (which may differ from the host directory +// basename) for the root entry in catalog listings. +func (s *Service) catalogNameForPath(volumeID uint16, fullPath, fallbackName string) string { + cleanPath := filepath.Clean(fullPath) + for i := range s.Volumes { + vol := s.Volumes[i] + if vol.ID != volumeID { + continue + } + if cleanPath == filepath.Clean(vol.Config.Path) && vol.Config.Name != "" { + return vol.Config.Name + } + break + } + return fallbackName +} + +// persistentVolumeIDForConfig derives a stable 16-bit volume ID +// from the volume's configured name and path so that clients see the +// same VolumeID across server restarts. Collisions within a single +// run are resolved by salting the CRC input. +func persistentVolumeIDForConfig(cfg VolumeConfig, used map[uint16]struct{}) uint16 { + nameKey := strings.ToLower(strings.TrimSpace(cfg.Name)) + pathKey := filepath.Clean(strings.TrimSpace(cfg.Path)) + + candidates := []string{ + nameKey, + nameKey + "|" + pathKey, + } + for _, key := range candidates { + id := crcVolumeID(key) + if _, exists := used[id]; exists { + continue + } + used[id] = struct{}{} + return id + } + + for salt := 1; ; salt++ { + id := crcVolumeID(fmt.Sprintf("%s|%s|%d", nameKey, pathKey, salt)) + if _, exists := used[id]; exists { + continue + } + used[id] = struct{}{} + return id + } +} + +func crcVolumeID(key string) uint16 { + id := uint16(crc32.ChecksumIEEE([]byte(key)) & 0xffff) + if id == 0 { + return 1 + } + return id +} + +// metaFor returns the ForkMetadataBackend for the given volume ID. +// If a per-volume backend is registered it is returned; otherwise the global +// injected backend (s.meta) is used. Returns nil when neither is available. +func (s *Service) metaFor(volID uint16) ForkMetadataBackend { + if s.metas != nil { + if m, ok := s.metas[volID]; ok { + return m + } + } + return s.meta +} + +// metaForPath returns the ForkMetadataBackend for the volume whose root path +// is a prefix of path. Falls back to the global injected backend when no +// matching volume is found. +func (s *Service) metaForPath(path string) ForkMetadataBackend { + clean := filepath.Clean(path) + for _, vol := range s.Volumes { + rel, err := filepath.Rel(vol.Config.Path, clean) + if err == nil && !strings.HasPrefix(rel, "..") { + return s.metaFor(vol.ID) + } + } + return s.meta +} + +func (s *Service) fsForVolume(volID uint16) FileSystem { + if fs, ok := s.volumeFS[volID]; ok && fs != nil { + return fs + } + return s.fs +} + +func (s *Service) fsForPath(path string) FileSystem { + clean := filepath.Clean(path) + for _, vol := range s.Volumes { + rel, err := filepath.Rel(filepath.Clean(vol.Config.Path), clean) + if err == nil && rel != ".." && !strings.HasPrefix(rel, ".."+string(filepath.Separator)) { + if fs := s.fsForVolume(vol.ID); fs != nil { + return fs + } + } + } + return s.fs +} + +func newBackendForVolumeConfig(cfg VolumeConfig) (FileSystem, error) { + fsType, err := NormalizeFSType(cfg.FSType) + if err != nil { + return nil, err + } + cfg.FSType = fsType + cfg.Path = filepath.Clean(cfg.Path) + return NewFS(cfg) +} diff --git a/service/afp/volume_models.go b/service/afp/volume_models.go index 36696f6..c558945 100644 --- a/service/afp/volume_models.go +++ b/service/afp/volume_models.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -5,6 +7,8 @@ import ( "encoding/binary" "fmt" "strings" + + "github.com/pgodw/omnitalk/pkg/binutil" ) func formatVolBitmap(bitmap uint16) string { @@ -115,11 +119,26 @@ func (res *FPOpenVolRes) String() string { return fmt.Sprintf("FPOpenVolRes{Bitmap: %s, DataLen: %d}", formatVolBitmap(res.Bitmap), len(res.Data)) } +func (res *FPOpenVolRes) WireSize() int { return 2 + len(res.Data) } + +func (res *FPOpenVolRes) MarshalWire(b []byte) (int, error) { + off := 0 + n, err := binutil.PutU16(b[off:], res.Bitmap) + if err != nil { + return 0, err + } + off += n + if len(b[off:]) < len(res.Data) { + return 0, binutil.ErrShortBuffer + } + off += copy(b[off:], res.Data) + return off, nil +} + func (res *FPOpenVolRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.Bitmap) - buf.Write(res.Data) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } type FPCloseVolReq struct { @@ -184,11 +203,26 @@ func (res *FPGetVolParmsRes) String() string { return fmt.Sprintf("FPGetVolParmsRes{Bitmap: %s, DataLen: %d}", formatVolBitmap(res.Bitmap), len(res.Data)) } +func (res *FPGetVolParmsRes) WireSize() int { return 2 + len(res.Data) } + +func (res *FPGetVolParmsRes) MarshalWire(b []byte) (int, error) { + off := 0 + n, err := binutil.PutU16(b[off:], res.Bitmap) + if err != nil { + return 0, err + } + off += n + if len(b[off:]) < len(res.Data) { + return 0, binutil.ErrShortBuffer + } + off += copy(b[off:], res.Data) + return off, nil +} + func (res *FPGetVolParmsRes) Marshal() []byte { - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, res.Bitmap) - buf.Write(res.Data) - return buf.Bytes() + b := make([]byte, res.WireSize()) + _, _ = res.MarshalWire(b) + return b } // FPSetVolParms - set volume parameters (AFP 2.x section 5.1.32) diff --git a/service/afp/volume_models_golden_test.go b/service/afp/volume_models_golden_test.go new file mode 100644 index 0000000..ced6a7b --- /dev/null +++ b/service/afp/volume_models_golden_test.go @@ -0,0 +1,36 @@ +//go:build afp || all + +package afp + +import ( + "bytes" + "testing" +) + +// TestFPOpenVolRes_MarshalGolden pins the wire-format output of FPOpenVolRes.Marshal. +func TestFPOpenVolRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPOpenVolRes{ + Bitmap: 0x1234, + Data: []byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE}, + } + got := res.Marshal() + want := goldenBytes(t, "fpopenvolres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} + +// TestFPGetVolParmsRes_MarshalGolden pins the wire-format output of FPGetVolParmsRes.Marshal. +func TestFPGetVolParmsRes_MarshalGolden(t *testing.T) { + t.Parallel() + res := &FPGetVolParmsRes{ + Bitmap: 0xBEEF, + Data: []byte("volparms-payload"), + } + got := res.Marshal() + want := goldenBytes(t, "fpgetvolparmsres_basic.hex", got) + if !bytes.Equal(got, want) { + t.Fatalf("Marshal output drift:\n got: %x\n want: %x", got, want) + } +} diff --git a/service/afp/volume_signature_test.go b/service/afp/volume_signature_test.go index 86bf2e2..63ec1ab 100644 --- a/service/afp/volume_signature_test.go +++ b/service/afp/volume_signature_test.go @@ -1,3 +1,5 @@ +//go:build afp || all + package afp import ( @@ -28,7 +30,7 @@ func TestConstrainAFPVolumeType(t *testing.T) { func TestAFP_OpenVol_UsesFixedDirIDVolumeType(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) res, errCode := s.handleOpenVol(&FPOpenVolReq{ Bitmap: VolBitmapSignature | VolBitmapVolID, @@ -49,7 +51,7 @@ func TestAFP_OpenVol_UsesFixedDirIDVolumeType(t *testing.T) { func TestAFP_GetVolParms_UsesFixedDirIDVolumeType(t *testing.T) { root := t.TempDir() - s := NewAFPService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) + s := NewService("TestServer", []VolumeConfig{{Name: "Vol", Path: root}}, &LocalFileSystem{}, nil) res, errCode := s.handleGetVolParms(&FPGetVolParmsReq{ VolumeID: 1, diff --git a/service/afp/volume_state.go b/service/afp/volume_state.go new file mode 100644 index 0000000..35c7db1 --- /dev/null +++ b/service/afp/volume_state.go @@ -0,0 +1,39 @@ +//go:build afp || all + +package afp + +import "sync" + +// backupDates holds FPSetVolParms-supplied backup dates per volume. AFP 2.x +// §5.1.32 lets clients write a 32-bit "backup date" against a volume; we +// remember it so subsequent FPGetVolParms returns the same value. +// +// This is the only volume-related field that mutates after Service.Start. +// The Volumes slice and the volumeFS / metas / cnidStores maps are +// populated once during installVolumes and read-only thereafter, so they +// need no synchronisation. backupDates carries its own mutex so the +// FPSetVolParms write path no longer contends with fork, desktop, or auth +// traffic. +type backupDates struct { + mu sync.RWMutex + m map[uint16]uint32 +} + +func newBackupDates() backupDates { + return backupDates{m: make(map[uint16]uint32)} +} + +// get returns the recorded backup date for volID, or zero when none has +// been set. +func (b *backupDates) get(volID uint16) uint32 { + b.mu.RLock() + defer b.mu.RUnlock() + return b.m[volID] +} + +// set records when as the backup date for volID. +func (b *backupDates) set(volID uint16, when uint32) { + b.mu.Lock() + defer b.mu.Unlock() + b.m[volID] = when +} diff --git a/service/afpfs/macgarden/fs.go b/service/afpfs/macgarden/fs.go new file mode 100644 index 0000000..42bd234 --- /dev/null +++ b/service/afpfs/macgarden/fs.go @@ -0,0 +1,1814 @@ +//go:build (afp && macgarden) || all + +// Package macgarden implements an AFP FileSystem backend that exposes +// macintoshgarden.org as a read-only volume tree (Apps/, Games/, +// search/). It plugs into the AFP FileSystem registry under the +// "macgarden" type and is gated behind the `macgarden` build tag. +// +// Lives in service/afpfs/ alongside future AFP filesystem backends so +// the core AFP package never imports any specific filesystem. +package macgarden + +import ( + "errors" + "fmt" + "io" + "io/fs" + "maps" + "net/url" + "os" + "path/filepath" + "slices" + "sort" + "strings" + "sync" + "time" + "unicode" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/service/afp" + garden "github.com/pgodw/omnitalk/service/macgarden" +) + +const macGardenEnumerateWindow = 10 +const macGardenSearchPageSize = 20 + +type macGardenFileInfo struct { + name string + size int64 + mode fs.FileMode + modTime time.Time + isDir bool +} + +func (i *macGardenFileInfo) Name() string { return i.name } +func (i *macGardenFileInfo) Size() int64 { return i.size } +func (i *macGardenFileInfo) Mode() fs.FileMode { return i.mode } +func (i *macGardenFileInfo) ModTime() time.Time { return i.modTime } +func (i *macGardenFileInfo) IsDir() bool { return i.isDir } +func (i *macGardenFileInfo) Sys() any { return nil } + +type macGardenDirEntry struct{ info fs.FileInfo } + +func (d macGardenDirEntry) Name() string { return d.info.Name() } +func (d macGardenDirEntry) IsDir() bool { return d.info.IsDir() } +func (d macGardenDirEntry) Type() fs.FileMode { return d.info.Mode().Type() } +func (d macGardenDirEntry) Info() (fs.FileInfo, error) { return d.info, nil } + +type macGardenCachedResult struct { + Name string + URL string +} + +type macGardenAsset struct { + Name string + URL string + Size int64 + Content []byte +} + +type macGardenCategoryPageMeta struct { + TotalCount uint16 + PageSize int + LastPageNumber int + LastPageCount int +} + +type macGardenFile struct { + asset macGardenAsset + client *garden.Client +} + +func (f *macGardenFile) ReadAt(p []byte, off int64) (n int, err error) { + if off < 0 { + return 0, fs.ErrInvalid + } + if len(f.asset.Content) > 0 { + if off >= int64(len(f.asset.Content)) { + return 0, io.EOF + } + n = copy(p, f.asset.Content[off:]) + if n < len(p) { + return n, io.EOF + } + return n, nil + } + // ReadURLRange applies the client's maxRangeSize cap internally, so it may + // return fewer bytes than len(p). Signal io.EOF only when the HTTP response + // is shorter than the bytes we actually requested — meaning we hit real EOF, + // not just the range cap. FPRead buffers are already bounded by the same cap + // (via handleRead.maxReadSize), so for that path len(data)==len(p) always. + // FPCopyFile re-reads in a loop, so getting n 0 && requested > max { + requested = max + } + data, readErr := f.client.ReadURLRange(f.asset.URL, off, len(p)) + if readErr != nil { + return 0, fmt.Errorf("%w: %v", afp.ErrCopySourceReadEOF, readErr) + } + n = copy(p, data) + if len(data) < requested { + return n, io.EOF + } + return n, nil +} + +func (f *macGardenFile) WriteAt(_ []byte, _ int64) (n int, err error) { return 0, fs.ErrPermission } +func (f *macGardenFile) Truncate(_ int64) error { return fs.ErrPermission } +func (f *macGardenFile) Close() error { return nil } +func (f *macGardenFile) Sync() error { return nil } +func (f *macGardenFile) Stat() (fs.FileInfo, error) { + size := f.asset.Size + if size == 0 && f.asset.URL != "" { + if s, err := f.client.GetContentLength(f.asset.URL); err == nil { + size = s + } + } + return &macGardenFileInfo{name: filepath.Base(f.asset.Name), size: size, mode: 0o444, modTime: time.Now().UTC()}, nil +} + +// fetchAndCacheScreenshot downloads a screenshot URL and stores it in the +// in-memory cache. Subsequent OpenFile calls serve from cache without network I/O. +func (m *MacGardenFileSystem) fetchAndCacheScreenshot(url string) ([]byte, error) { + m.screenshotMu.RLock() + if data, ok := m.screenshotCache[url]; ok { + m.screenshotMu.RUnlock() + return data, nil + } + m.screenshotMu.RUnlock() + data, err := m.client.FetchFull(url) + if err != nil { + return nil, err + } + m.screenshotMu.Lock() + m.screenshotCache[url] = data + m.screenshotMu.Unlock() + return data, nil +} + +// resolveAssetSize returns the known size, or triggers a size fetch appropriate +// for the asset type. Called during FPGetFileDirParms so Finder sees the real size. +// Screenshots: full download cached in memory (avoids HEAD which gets blocked). +// Downloads: ranged GET to read the Content-Range total only. +func (m *MacGardenFileSystem) resolveAssetSize(a macGardenAsset) int64 { + if a.Size > 0 || a.URL == "" { + return a.Size + } + if strings.HasPrefix(a.Name, "Screenshots/") { + if data, err := m.fetchAndCacheScreenshot(a.URL); err == nil { + return int64(len(data)) + } + return 0 + } + if s, err := m.client.GetContentLength(a.URL); err == nil { + return s + } + return 0 +} + +// MacGardenFileSystem is a read-only virtual filesystem backed by macintoshgarden.org. +type macGardenSearchCache struct { + pages map[int][]garden.SearchResult // pageNumber -> results + exhausted bool // true when all pages have been fetched +} + +type MacGardenFileSystem struct { + root string + client *garden.Client + + mu sync.RWMutex + categories []garden.Category + searchByName map[string]macGardenCachedResult + itemURLByDir map[string]string + itemByURL map[string]*garden.SoftwareItem + itemsInCategory map[string][]garden.SearchResult // categoryURL -> items + categoryItemCount map[string]uint16 + categoryPageMeta map[string]macGardenCategoryPageMeta + categoryPageItems map[string]map[int][]garden.SearchResult + downloadByPath map[string]macGardenAsset + screenshotByPath map[string]macGardenAsset + descriptionByPath map[string]macGardenAsset + catSearchCache map[string]*macGardenSearchCache // normalized query -> cached results + + screenshotMu sync.RWMutex + screenshotCache map[string][]byte // URL -> full image bytes + + stop chan struct{} + stopOnce sync.Once + wg sync.WaitGroup +} + +func init() { + afp.RegisterFS(afp.FSTypeMacGarden, func(cfg afp.VolumeConfig) (afp.FileSystem, error) { + return NewMacGardenFileSystem(filepath.Clean(cfg.Path)), nil + }) +} + +func NewMacGardenFileSystem(root string) *MacGardenFileSystem { + gc := garden.NewClient() + gc.Prime() + fsys := &MacGardenFileSystem{ + root: filepath.Clean(root), + client: gc, + searchByName: make(map[string]macGardenCachedResult), + itemURLByDir: make(map[string]string), + itemByURL: make(map[string]*garden.SoftwareItem), + itemsInCategory: make(map[string][]garden.SearchResult), + categoryItemCount: make(map[string]uint16), + categoryPageMeta: make(map[string]macGardenCategoryPageMeta), + categoryPageItems: make(map[string]map[int][]garden.SearchResult), + downloadByPath: make(map[string]macGardenAsset), + screenshotByPath: make(map[string]macGardenAsset), + descriptionByPath: make(map[string]macGardenAsset), + catSearchCache: make(map[string]*macGardenSearchCache), + screenshotCache: make(map[string][]byte), + stop: make(chan struct{}), + } + fsys.loadCategories() + return fsys +} + +func (m *MacGardenFileSystem) loadCategories() { + m.mu.RLock() + if len(m.categories) > 0 { + m.mu.RUnlock() + return + } + m.mu.RUnlock() + cats, err := m.client.GetCategories() + if err != nil { + netlog.Warn("[AFP][MacGarden] failed to fetch categories: %v", err) + return + } + sort.Slice(cats, func(i, j int) bool { return strings.ToLower(cats[i].Name) < strings.ToLower(cats[j].Name) }) + m.mu.Lock() + if len(m.categories) == 0 { + m.categories = cats + } + m.mu.Unlock() + if len(cats) == 0 { + netlog.Warn("[AFP][MacGarden] category fetch succeeded but returned no categories") + } +} + +func (m *MacGardenFileSystem) normalize(path string) (string, error) { + clean := filepath.Clean(path) + rel, err := filepath.Rel(m.root, clean) + if err != nil || rel == ".." || strings.HasPrefix(rel, ".."+string(filepath.Separator)) { + return "", fs.ErrPermission + } + if rel == "." { + return "", nil + } + return filepath.ToSlash(rel), nil +} + +// readDirCore resolves a normalized relative path to directory entries. It is +// the shared implementation used by both ReadDir and ReadDirRange. Callers are +// responsible for running it in a goroutine if a timeout is needed. +func (m *MacGardenFileSystem) readDirCore(rel string) ([]fs.DirEntry, error) { + if rel == "" { + netlog.Debug("[AFP][MacGarden] ReadDir root") + return []fs.DirEntry{ + macGardenDirEntry{info: &macGardenFileInfo{name: "Apps", mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}}, + macGardenDirEntry{info: &macGardenFileInfo{name: "Games", mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}}, + macGardenDirEntry{info: &macGardenFileInfo{name: "search", mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}}, + }, nil + } + + parts := strings.Split(rel, "/") + + // Apps or Games level: show categories for that type. + if len(parts) == 1 && (parts[0] == "Apps" || parts[0] == "Games") { + netlog.Debug("[AFP][MacGarden] ReadDir %s", parts[0]) + m.loadCategories() + catType := parts[0] + urlPrefix := "/apps/" + if catType == "Games" { + urlPrefix = "/games/" + } + m.mu.RLock() + defer m.mu.RUnlock() + entries := make([]fs.DirEntry, 0, len(m.categories)) + for _, cat := range m.categories { + if strings.HasPrefix(strings.ToLower(urlPathFromAbsolute(cat.URL)), urlPrefix) { + entries = append(entries, macGardenDirEntry{info: &macGardenFileInfo{name: cat.Name, mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}}) + } + } + netlog.Info("[AFP][MacGarden] ReadDir %s returning %d entries", catType, len(entries)) + return entries, nil + } + + // /search — list all cached search queries as subdirectories. + if len(parts) == 1 && parts[0] == "search" { + m.mu.RLock() + queries := slices.Sorted(maps.Keys(m.catSearchCache)) + m.mu.RUnlock() + entries := make([]fs.DirEntry, 0, len(queries)) + for _, q := range queries { + entries = append(entries, macGardenDirEntry{info: &macGardenFileInfo{name: q, mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}}) + } + return entries, nil + } + + // /search/ — list type subdirectories (App, Game) plus untyped items. + if len(parts) == 2 && parts[0] == "search" { + m.mu.RLock() + cache, ok := m.catSearchCache[parts[1]] + m.mu.RUnlock() + if !ok { + return nil, fs.ErrNotExist + } + pageNums := slices.Sorted(maps.Keys(cache.pages)) + typesSeen := map[string]struct{}{} + untypedSeen := map[string]struct{}{} + var typeNames, untypedNames []string + for _, pn := range pageNums { + for _, r := range cache.pages[pn] { + if r.Type != "" { + if _, exists := typesSeen[r.Type]; !exists { + typesSeen[r.Type] = struct{}{} + typeNames = append(typeNames, r.Type) + } + } else { + if name := sanitizeGardenName(r.Name); name != "" { + if _, exists := untypedSeen[name]; !exists { + untypedSeen[name] = struct{}{} + untypedNames = append(untypedNames, name) + } + } + } + } + } + sort.Strings(typeNames) + sort.Strings(untypedNames) + entries := make([]fs.DirEntry, 0, len(typeNames)+len(untypedNames)) + for _, name := range typeNames { + entries = append(entries, macGardenDirEntry{info: &macGardenFileInfo{name: name, mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}}) + } + for _, name := range untypedNames { + entries = append(entries, macGardenDirEntry{info: &macGardenFileInfo{name: name, mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}}) + } + return entries, nil + } + + // /search// — virtual type subdirectory (App/Game). + if len(parts) == 3 && parts[0] == "search" && isSearchResultType(parts[2]) { + m.mu.RLock() + cache, ok := m.catSearchCache[parts[1]] + m.mu.RUnlock() + if !ok { + return nil, fs.ErrNotExist + } + resultType := parts[2] + var names []string + for _, page := range cache.pages { + for _, r := range page { + if r.Type == resultType { + if name := sanitizeGardenName(r.Name); name != "" { + names = append(names, name) + } + } + } + } + sort.Strings(names) + entries := make([]fs.DirEntry, 0, len(names)) + for _, name := range names { + entries = append(entries, macGardenDirEntry{info: &macGardenFileInfo{name: name, mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}}) + } + return entries, nil + } + + // /search// — assets for that item. + if len(parts) == 3 && parts[0] == "search" { + itemName := parts[2] + m.mu.RLock() + search, ok := m.searchByName[itemName] + m.mu.RUnlock() + if !ok { + return nil, fs.ErrNotExist + } + if err := m.ensureItemForDir(itemName, search.URL); err != nil { + return nil, err + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return nil, err + } + return buildItemDirEntries(assets, ""), nil + } + + // /search///[/] — typed item or its subdirectory. + if len(parts) >= 4 && parts[0] == "search" && isSearchResultType(parts[2]) { + itemName := parts[3] + subPath := filepath.ToSlash(filepath.Join(parts[4:]...)) + m.mu.RLock() + search, ok := m.searchByName[itemName] + m.mu.RUnlock() + if !ok { + return nil, fs.ErrNotExist + } + if err := m.ensureItemForDir(itemName, search.URL); err != nil { + return nil, err + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return nil, err + } + return buildItemDirEntries(assets, subPath), nil + } + + // /search/// — subdirectory within an item. + if len(parts) >= 4 && parts[0] == "search" { + itemName := parts[2] + subPath := filepath.ToSlash(filepath.Join(parts[3:]...)) + m.mu.RLock() + search, ok := m.searchByName[itemName] + m.mu.RUnlock() + if !ok { + return nil, fs.ErrNotExist + } + if err := m.ensureItemForDir(itemName, search.URL); err != nil { + return nil, err + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return nil, err + } + return buildItemDirEntries(assets, subPath), nil + } + + // Apps/Games/CategoryName/ItemName — assets for a software item + if len(parts) == 3 && (parts[0] == "Apps" || parts[0] == "Games") { + catName, itemName := parts[1], parts[2] + catURL := m.getCategoryURL(catName) + if catURL == "" { + return nil, fs.ErrNotExist + } + itemURL, err := m.getItemURLInCategory(catURL, itemName) + if err != nil { + return nil, fs.ErrNotExist + } + if err := m.ensureItemForDir(itemName, itemURL); err != nil { + return nil, err + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return nil, err + } + return buildItemDirEntries(assets, ""), nil + } + + // Apps/Games/CategoryName/ItemName/SubDir... — subdirectory within an item + if len(parts) >= 4 && (parts[0] == "Apps" || parts[0] == "Games") { + catName, itemName := parts[1], parts[2] + subPath := filepath.ToSlash(filepath.Join(parts[3:]...)) + catURL := m.getCategoryURL(catName) + if catURL == "" { + return nil, fs.ErrNotExist + } + itemURL, err := m.getItemURLInCategory(catURL, itemName) + if err != nil { + return nil, fs.ErrNotExist + } + if err := m.ensureItemForDir(itemName, itemURL); err != nil { + return nil, err + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return nil, err + } + return buildItemDirEntries(assets, subPath), nil + } + + return nil, fs.ErrNotExist +} + +func (m *MacGardenFileSystem) ReadDir(path string) ([]fs.DirEntry, error) { + rel, err := m.normalize(path) + if err != nil { + return nil, err + } + return m.readDirCore(rel) +} + +func (m *MacGardenFileSystem) ReadDirRange(path string, startIndex uint16, reqCount uint16) ([]fs.DirEntry, uint16, error) { + if reqCount == 0 { + return nil, 0, nil + } + rel, err := m.normalize(path) + if err != nil { + return nil, 0, err + } + parts := strings.Split(rel, "/") + if len(parts) == 1 && (parts[0] == "Apps" || parts[0] == "Games") { + m.loadCategories() + prefix := "/apps/" + if parts[0] == "Games" { + prefix = "/games/" + } + m.mu.RLock() + filtered := make([]fs.DirEntry, 0, len(m.categories)) + for _, cat := range m.categories { + if strings.HasPrefix(strings.ToLower(urlPathFromAbsolute(cat.URL)), prefix) { + filtered = append(filtered, macGardenDirEntry{info: &macGardenFileInfo{name: cat.Name, mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}}) + } + } + m.mu.RUnlock() + total := uint16(len(filtered)) + if startIndex < 1 { + startIndex = 1 + } + if int(startIndex) > len(filtered) { + return nil, total, nil + } + start := int(startIndex) - 1 + end := start + int(reqCount) + if end > len(filtered) { + end = len(filtered) + } + return append([]fs.DirEntry(nil), filtered[start:end]...), total, nil + } + if len(parts) == 2 && (parts[0] == "Apps" || parts[0] == "Games") { + catURL := m.getCategoryURL(parts[1]) + if catURL == "" { + return nil, 0, fs.ErrNotExist + } + return m.readCategoryDirRange(catURL, startIndex, reqCount) + } + entries, err := m.readDirCore(rel) + if err != nil { + return nil, 0, err + } + total := uint16(len(entries)) + if startIndex < 1 { + startIndex = 1 + } + if int(startIndex) > len(entries) { + return nil, total, nil + } + start := int(startIndex) - 1 + end := start + int(reqCount) + if end > len(entries) { + end = len(entries) + } + return append([]fs.DirEntry(nil), entries[start:end]...), total, nil +} + +func (m *MacGardenFileSystem) Stat(path string) (fs.FileInfo, error) { + rel, err := m.normalize(path) + if err != nil { + return nil, err + } + if rel == "" { + return &macGardenFileInfo{name: filepath.Base(m.root), mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + + parts := strings.Split(rel, "/") + + // Apps or Games level + if len(parts) == 1 && (parts[0] == "Apps" || parts[0] == "Games") { + return &macGardenFileInfo{name: parts[0], mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + + // /search virtual directory + if len(parts) == 1 && parts[0] == "search" { + return &macGardenFileInfo{name: "search", mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + + // /search/ + if len(parts) == 2 && parts[0] == "search" { + m.mu.RLock() + _, ok := m.catSearchCache[parts[1]] + m.mu.RUnlock() + if ok { + return &macGardenFileInfo{name: parts[1], mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + return nil, fs.ErrNotExist + } + + // /search// — virtual type subdirectory (App/Game) + // /search// — item directory + if len(parts) == 3 && parts[0] == "search" { + if isSearchResultType(parts[2]) { + return &macGardenFileInfo{name: parts[2], mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + itemName := parts[2] + m.mu.RLock() + cache, ok := m.catSearchCache[parts[1]] + m.mu.RUnlock() + if !ok { + return nil, fs.ErrNotExist + } + for _, page := range cache.pages { + for _, r := range page { + if sanitizeGardenName(r.Name) == itemName { + return &macGardenFileInfo{name: itemName, mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + } + } + return nil, fs.ErrNotExist + } + + // /search///[/] or /search/// + if len(parts) >= 4 && parts[0] == "search" { + var itemName, fileName string + if isSearchResultType(parts[2]) { + itemName = parts[3] + fileName = strings.Join(parts[4:], "/") + } else { + itemName = parts[2] + fileName = strings.Join(parts[3:], "/") + } + if fileName == "" { + // It's the item directory itself under a type subdirectory + return &macGardenFileInfo{name: itemName, mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + m.mu.RLock() + search, ok := m.searchByName[itemName] + loaded := false + if ok { + _, loaded = m.itemByURL[search.URL] + } + m.mu.RUnlock() + if !ok || !loaded { + return nil, fs.ErrNotExist + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return nil, err + } + for _, a := range assets { + if a.Name == fileName { + return &macGardenFileInfo{name: filepath.Base(a.Name), size: m.resolveAssetSize(a), mode: 0o444, modTime: time.Now().UTC()}, nil + } + } + prefix := fileName + "/" + for _, a := range assets { + if strings.HasPrefix(a.Name, prefix) { + return &macGardenFileInfo{name: filepath.Base(fileName), mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + } + return nil, fs.ErrNotExist + } + + // Search-hit item directory at root level (legacy, retained for compatibility). + if len(parts) == 1 { + m.mu.RLock() + _, ok := m.searchByName[parts[0]] + m.mu.RUnlock() + if ok { + return &macGardenFileInfo{name: parts[0], mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + } + + // Category level - return immediately without fetching items + // Stat should be lightweight; items are fetched lazily only on ReadDir + if len(parts) == 2 && (parts[0] == "Apps" || parts[0] == "Games") { + catName := parts[1] + catURL := m.getCategoryURL(catName) + if catURL != "" { + netlog.Debug("[AFP][MacGarden] Stat returning category (no lazy fetch): %s", catName) + return &macGardenFileInfo{name: catName, mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + return nil, fs.ErrNotExist + } + + // Item level - return immediately without fetching items + if len(parts) == 3 && (parts[0] == "Apps" || parts[0] == "Games") { + itemName := parts[2] + // Don't fetch the item here; just return dir info + // Real items are fetched lazily when ReadDir is called + netlog.Debug("[AFP][MacGarden] Stat returning item (no lazy fetch): %s", itemName) + return &macGardenFileInfo{name: itemName, mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + + // macOS probes certain well-known system paths on every directory it visits. + // Reject them quickly so we never trigger network fetches for them. + macSystemNames := map[string]bool{ + "Configuration": true, + "Network Trash Folder": true, + "TheVolumeSettingsFolder": true, + "Temporary Items": true, + ".DS_Store": true, + "Icon\r": true, + } + if len(parts) >= 3 && macSystemNames[parts[len(parts)-1]] { + return nil, fs.ErrNotExist + } + + // Asset level (file) + if len(parts) >= 4 && (parts[0] == "Apps" || parts[0] == "Games") { + catName := parts[1] + itemName := parts[2] + fileName := strings.Join(parts[3:], "/") + + catURL := m.getCategoryURL(catName) + if catURL == "" { + return nil, fs.ErrNotExist + } + + itemURL, err := m.getItemURLInCategory(catURL, itemName) + if err != nil { + return nil, fs.ErrNotExist + } + + // Keep Stat lazy for item children: if the item has not been opened yet, + // do not fetch details just to probe a potential child path. + m.mu.RLock() + _, loaded := m.itemByURL[itemURL] + m.mu.RUnlock() + if !loaded { + return nil, fs.ErrNotExist + } + + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return nil, err + } + + for _, a := range assets { + if a.Name == fileName { + return &macGardenFileInfo{name: filepath.Base(a.Name), size: m.resolveAssetSize(a), mode: 0o444, modTime: time.Now().UTC()}, nil + } + } + prefix := fileName + "/" + for _, a := range assets { + if strings.HasPrefix(a.Name, prefix) { + return &macGardenFileInfo{name: filepath.Base(fileName), mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}, nil + } + } + } + + // Asset-level file under root search-hit item dir: ItemName/Asset + if len(parts) >= 2 && parts[0] != "Apps" && parts[0] != "Games" { + itemName := parts[0] + fileName := filepath.Join(parts[1:]...) + m.mu.RLock() + search, ok := m.searchByName[itemName] + loaded := false + if ok { + _, loaded = m.itemByURL[search.URL] + } + m.mu.RUnlock() + if !ok || !loaded { + return nil, fs.ErrNotExist + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return nil, err + } + for _, a := range assets { + if a.Name == fileName { + return &macGardenFileInfo{name: a.Name, size: a.Size, mode: 0o444, modTime: time.Now().UTC()}, nil + } + } + } + + return nil, fs.ErrNotExist +} + +func (m *MacGardenFileSystem) DiskUsage(_ string) (totalBytes uint64, freeBytes uint64, err error) { + return 0x20000000, 0x18000000, nil +} + +func (m *MacGardenFileSystem) ChildCount(path string) (uint16, error) { + rel, err := m.normalize(path) + if err != nil { + return 0, err + } + if rel == "" { + return 3, nil // Apps + Games + search + } + + m.loadCategories() + parts := strings.Split(rel, "/") + if len(parts) == 1 { + switch parts[0] { + case "Apps": + return m.countCategoriesWithPrefix("/apps/"), nil + case "Games": + return m.countCategoriesWithPrefix("/games/"), nil + } + } + if len(parts) == 2 && (parts[0] == "Apps" || parts[0] == "Games") { + catURL := m.getCategoryURL(parts[1]) + if catURL == "" { + return 0, nil + } + m.mu.RLock() + if count, ok := m.categoryItemCount[catURL]; ok { + m.mu.RUnlock() + return count, nil + } + m.mu.RUnlock() + // Category counts must remain fully lazy. Until a category has actually + // been opened and its items fetched, report an unknown count as zero + // rather than triggering remote requests during parent directory enumerate. + return 0, nil + } + if len(parts) == 3 && (parts[0] == "Apps" || parts[0] == "Games") { + itemName := parts[2] + m.mu.RLock() + itemURL := m.itemURLByDir[itemName] + item := m.itemByURL[itemURL] + m.mu.RUnlock() + if item == nil { + return 0, nil + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return 0, nil + } + return uint16(len(buildItemDirEntries(assets, ""))), nil + } + if len(parts) >= 4 && (parts[0] == "Apps" || parts[0] == "Games") { + itemName := parts[2] + subPath := strings.Join(parts[3:], "/") + m.mu.RLock() + itemURL := m.itemURLByDir[itemName] + item := m.itemByURL[itemURL] + m.mu.RUnlock() + if item == nil { + return 0, nil + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return 0, nil + } + return uint16(len(buildItemDirEntries(assets, subPath))), nil + } + if len(parts) >= 1 && parts[0] == "search" { + switch len(parts) { + case 1: + // /search — number of cached queries. + m.mu.RLock() + n := uint16(len(m.catSearchCache)) + m.mu.RUnlock() + return n, nil + case 2: + // /search/ — count distinct type dirs + untyped items. + m.mu.RLock() + cache, ok := m.catSearchCache[parts[1]] + m.mu.RUnlock() + if !ok { + return 0, nil + } + typesSeen := map[string]struct{}{} + untypedSeen := map[string]struct{}{} + for _, page := range cache.pages { + for _, r := range page { + if r.Type != "" { + typesSeen[r.Type] = struct{}{} + } else if name := sanitizeGardenName(r.Name); name != "" { + untypedSeen[name] = struct{}{} + } + } + } + return clampGardenCount(len(typesSeen) + len(untypedSeen)), nil + case 3: + // /search// — count items of that type. + if isSearchResultType(parts[2]) { + m.mu.RLock() + cache, ok := m.catSearchCache[parts[1]] + m.mu.RUnlock() + if !ok { + return 0, nil + } + seen := map[string]struct{}{} + for _, page := range cache.pages { + for _, r := range page { + if r.Type == parts[2] { + if name := sanitizeGardenName(r.Name); name != "" { + seen[name] = struct{}{} + } + } + } + } + return clampGardenCount(len(seen)), nil + } + // /search// — offspring count for item root. + itemName := parts[2] + m.mu.RLock() + itemURL := m.itemURLByDir[itemName] + item := m.itemByURL[itemURL] + m.mu.RUnlock() + if item == nil { + return 0, nil + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return 0, nil + } + return uint16(len(buildItemDirEntries(assets, ""))), nil + default: + // /search///[/] or /search/// + var itemName, subPath string + if isSearchResultType(parts[2]) { + itemName = parts[3] + subPath = strings.Join(parts[4:], "/") + } else { + itemName = parts[2] + subPath = strings.Join(parts[3:], "/") + } + m.mu.RLock() + itemURL := m.itemURLByDir[itemName] + item := m.itemByURL[itemURL] + m.mu.RUnlock() + if item == nil { + return 0, nil + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return 0, nil + } + return uint16(len(buildItemDirEntries(assets, subPath))), nil + } + } + if len(parts) == 1 { + return 0, nil + } + return 0, &afp.NotSupportedError{Operation: "ChildCount"} +} + +// DirAttributes returns AFP directory attribute bits for a path. +// /search is flagged invisible so it stays hidden from normal Finder browsing. +func (m *MacGardenFileSystem) DirAttributes(path string) (uint16, error) { + rel, err := m.normalize(path) + if err != nil { + return 0, err + } + if rel == "search" { + return afp.DirAttrInvisible, nil + } + return 0, nil +} + +func (m *MacGardenFileSystem) IsReadOnly(_ string) (bool, error) { + return true, nil +} + +// SetMaxRangeSize limits each HTTP range request to at most n bytes. +// Called by the AFP service with the ASP quantum size so that reads from +// macintoshgarden.org never exceed what can fit in one ASP reply. +func (m *MacGardenFileSystem) SetMaxRangeSize(n int) { + m.client.SetMaxRangeSize(n) +} + +func (m *MacGardenFileSystem) SupportsCatSearch(_ string) (bool, error) { + return true, nil +} + +func (m *MacGardenFileSystem) Capabilities() afp.FileSystemCapabilities { + return afp.FileSystemCapabilities{ + CatSearch: true, + ChildCount: true, + ReadDirRange: true, + DirAttributes: true, + ReadOnlyState: true, + } +} + +func (m *MacGardenFileSystem) Close() error { + m.stopOnce.Do(func() { close(m.stop) }) + m.wg.Wait() + return nil +} + +func (m *MacGardenFileSystem) CreateDir(_ string) error { return fs.ErrPermission } +func (m *MacGardenFileSystem) CreateFile(_ string) (afp.File, error) { return nil, fs.ErrPermission } +func (m *MacGardenFileSystem) Remove(_ string) error { return fs.ErrPermission } +func (m *MacGardenFileSystem) Rename(_, _ string) error { return fs.ErrPermission } + +// openAsset wraps an asset in a macGardenFile, populating Content from the +// in-memory screenshot cache when the image has already been downloaded. +func (m *MacGardenFileSystem) openAsset(a macGardenAsset) *macGardenFile { + if strings.HasPrefix(a.Name, "Screenshots/") && a.URL != "" && len(a.Content) == 0 { + m.screenshotMu.RLock() + data, ok := m.screenshotCache[a.URL] + m.screenshotMu.RUnlock() + if ok { + a.Content = data + a.Size = int64(len(data)) + } + } + return &macGardenFile{asset: a, client: m.client} +} + +func (m *MacGardenFileSystem) OpenFile(path string, flag int) (afp.File, error) { + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, fs.ErrPermission + } + rel, err := m.normalize(path) + if err != nil { + return nil, err + } + + parts := strings.Split(rel, "/") + + // /search//[/]/ + if len(parts) >= 4 && parts[0] == "search" { + var itemName, fileName string + if isSearchResultType(parts[2]) { + if len(parts) < 5 { + return nil, fs.ErrInvalid + } + itemName = parts[3] + fileName = strings.Join(parts[4:], "/") + } else { + itemName = parts[2] + fileName = strings.Join(parts[3:], "/") + } + m.mu.RLock() + search, ok := m.searchByName[itemName] + m.mu.RUnlock() + if !ok { + return nil, fs.ErrNotExist + } + if err := m.ensureItemForDir(itemName, search.URL); err != nil { + return nil, fs.ErrNotExist + } + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return nil, err + } + for _, a := range assets { + if a.Name == fileName { + return m.openAsset(a), nil + } + } + return nil, fs.ErrNotExist + } + + // Must be asset level: Apps/Category/Item/Asset or deeper + if len(parts) < 4 || (parts[0] != "Apps" && parts[0] != "Games") { + return nil, fs.ErrInvalid + } + + catName := parts[1] + itemName := parts[2] + fileName := strings.Join(parts[3:], "/") + + catURL := m.getCategoryURL(catName) + if catURL == "" { + return nil, fs.ErrNotExist + } + + itemURL, err := m.getItemURLInCategory(catURL, itemName) + if err != nil { + return nil, fs.ErrNotExist + } + + if err := m.ensureItemForDir(itemName, itemURL); err != nil { + return nil, fs.ErrNotExist + } + + assets, err := m.itemAssetsByDir(itemName) + if err != nil { + return nil, err + } + + for _, a := range assets { + if a.Name == fileName { + return m.openAsset(a), nil + } + } + return nil, fs.ErrNotExist +} + +func (m *MacGardenFileSystem) CatSearch(_ string, query string, reqMatches int32, cursor [16]byte) ([]string, [16]byte, int32) { + rawQuery := strings.TrimSpace(query) + if rawQuery == "" { + return nil, cursor, afp.ErrParamErr + } + normalizedQuery := normalizeMacGardenSearchQuery(rawQuery) + if normalizedQuery == "" { + return nil, cursor, afp.ErrParamErr + } + + limit := int(reqMatches) + if limit <= 0 { + limit = 25 + } + + isContinuation := cursor[0] == 0x01 + cursorQueryHash := uint32(cursor[1])<<16 | uint32(cursor[2])<<8 | uint32(cursor[3]) + cursorOffset := uint32(cursor[4])<<24 | uint32(cursor[5])<<16 | uint32(cursor[6])<<8 | uint32(cursor[7]) + + queryHash := uint32(0) + if len(normalizedQuery) >= 3 { + queryHash = uint32(normalizedQuery[0])<<16 | uint32(normalizedQuery[1])<<8 | uint32(normalizedQuery[2]) + } else if len(normalizedQuery) > 0 { + for i := 0; i < len(normalizedQuery); i++ { + queryHash = (queryHash << 8) | uint32(normalizedQuery[i]) + } + } + + startIdx := 0 + if isContinuation && cursorQueryHash == queryHash { + startIdx = int(cursorOffset) + } else { + netlog.Debug("[MacGarden][CatSearch] starting new search for %q", normalizedQuery) + } + + // Determine which page startIdx falls on and skip to the right entry within it. + firstPage := startIdx / macGardenSearchPageSize + skipInFirst := startIdx % macGardenSearchPageSize + + type hit struct { + result garden.SearchResult + name string + } + hits := make([]hit, 0, limit) + exhausted := false + + for pageNum := firstPage; len(hits) < limit; pageNum++ { + m.ensureSearchPage(normalizedQuery, pageNum) + + m.mu.RLock() + cache := m.catSearchCache[normalizedQuery] + var page []garden.SearchResult + if cache != nil { + page = cache.pages[pageNum] + exhausted = cache.exhausted + } + m.mu.RUnlock() + + if len(page) == 0 { + break + } + + skip := 0 + if pageNum == firstPage { + skip = skipInFirst + } + for i := skip; i < len(page) && len(hits) < limit; i++ { + name := sanitizeGardenName(page[i].Name) + if name != "" { + hits = append(hits, hit{result: page[i], name: name}) + } + } + + if len(page) < macGardenSearchPageSize || exhausted { + break + } + } + + netlog.Debug("[MacGarden][CatSearch] query=%q startIdx=%d firstPage=%d skip=%d returned=%d exhausted=%v", + normalizedQuery, startIdx, firstPage, skipInFirst, len(hits), exhausted) + + paths := make([]string, 0, len(hits)) + m.mu.Lock() + for _, h := range hits { + dir := h.name + if h.result.Type != "" { + dir = filepath.Join(h.result.Type, h.name) + } + paths = append(paths, filepath.Join(m.root, "search", normalizedQuery, dir)) + m.searchByName[h.name] = macGardenCachedResult{Name: h.result.Name, URL: h.result.URL} + m.itemURLByDir[h.name] = h.result.URL + } + m.mu.Unlock() + + moreAvailable := len(hits) == limit || !exhausted + + nextCursor := [16]byte{} + nextCursor[1] = byte((queryHash >> 16) & 0xFF) + nextCursor[2] = byte((queryHash >> 8) & 0xFF) + nextCursor[3] = byte(queryHash & 0xFF) + if moreAvailable { + nextCursor[0] = 0x01 + nextOffset := uint32(startIdx + len(hits)) + nextCursor[4] = byte((nextOffset >> 24) & 0xFF) + nextCursor[5] = byte((nextOffset >> 16) & 0xFF) + nextCursor[6] = byte((nextOffset >> 8) & 0xFF) + nextCursor[7] = byte(nextOffset & 0xFF) + } + + return paths, nextCursor, afp.NoErr +} + +// ensureSearchPage fetches a single MacGarden search page into the cache if it +// is not already there. Marks the cache exhausted when the page is partial +// (fewer than macGardenSearchPageSize items) or returns an error. +func (m *MacGardenFileSystem) ensureSearchPage(normalizedQuery string, pageNum int) { + m.mu.RLock() + cache, ok := m.catSearchCache[normalizedQuery] + if ok { + if _, cached := cache.pages[pageNum]; cached { + m.mu.RUnlock() + return + } + if cache.exhausted { + m.mu.RUnlock() + return + } + } + m.mu.RUnlock() + + netlog.Debug("[MacGarden][CatSearch] fetching search page %d for %q", pageNum, normalizedQuery) + pageResults, err := m.client.GetSearchPage(normalizedQuery, pageNum) + + m.mu.Lock() + cache, ok = m.catSearchCache[normalizedQuery] + if !ok { + cache = &macGardenSearchCache{pages: make(map[int][]garden.SearchResult)} + } + if _, alreadyCached := cache.pages[pageNum]; !alreadyCached { + if err != nil { + netlog.Warn("[MacGarden][CatSearch] page %d fetch failed for %q: %v", pageNum, normalizedQuery, err) + cache.exhausted = true + } else { + cache.pages[pageNum] = pageResults + if len(pageResults) < macGardenSearchPageSize { + netlog.Debug("[MacGarden][CatSearch] page %d: %d results for %q (last page)", pageNum, len(pageResults), normalizedQuery) + cache.exhausted = true + } else { + netlog.Debug("[MacGarden][CatSearch] page %d: %d results for %q", pageNum, len(pageResults), normalizedQuery) + } + } + m.catSearchCache[normalizedQuery] = cache + } + m.mu.Unlock() +} + +func normalizeMacGardenSearchQuery(s string) string { + s = strings.TrimSpace(s) + if s == "" { + return "" + } + lower := strings.ToLower(s) + for _, marker := range []string{" type:app,game", " type:app", " type:game", "type:app,game", "type:app", "type:game"} { + if idx := strings.Index(lower, marker); idx >= 0 { + s = s[:idx] + lower = strings.ToLower(s) + } + } + quoted := extractQuotedSegments(s) + if len(quoted) > 0 { + best := "" + bestScore := -1 + for _, q := range quoted { + cand := cleanMacGardenCandidate(q) + score := 0 + for _, r := range cand { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + score++ + } + } + if score > bestScore { + bestScore = score + best = cand + } + } + if best != "" { + return best + } + } + return cleanMacGardenCandidate(s) +} + +func mirrorFolderForURL(rawURL string) string { + u, err := url.Parse(rawURL) + if err != nil { + return "mirror-unknown" + } + switch strings.ToLower(u.Host) { + case "old.mac.gdn": + return "mirror-old" + case "download.macintoshgarden.org": + return "mirror-download" + default: + return "mirror-unknown" + } +} + +func buildItemDirEntries(assets []macGardenAsset, subPath string) []fs.DirEntry { + subPath = strings.Trim(strings.ReplaceAll(subPath, "\\", "/"), "/") + dirSeen := make(map[string]struct{}) + fileSeen := make(map[string]struct{}) + entries := make([]fs.DirEntry, 0, len(assets)) + + for _, a := range assets { + name := strings.Trim(strings.ReplaceAll(a.Name, "\\", "/"), "/") + if name == "" { + continue + } + if subPath != "" { + prefix := subPath + "/" + if !strings.HasPrefix(name, prefix) { + continue + } + name = strings.TrimPrefix(name, prefix) + if name == "" { + continue + } + } + + if idx := strings.Index(name, "/"); idx >= 0 { + dirName := name[:idx] + if dirName == "" { + continue + } + if _, ok := dirSeen[dirName]; ok { + continue + } + dirSeen[dirName] = struct{}{} + entries = append(entries, macGardenDirEntry{info: &macGardenFileInfo{name: dirName, mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}}) + continue + } + + if _, ok := fileSeen[name]; ok { + continue + } + fileSeen[name] = struct{}{} + entries = append(entries, macGardenDirEntry{info: &macGardenFileInfo{name: name, size: a.Size, mode: 0o444, modTime: time.Now().UTC()}}) + } + + sort.Slice(entries, func(i, j int) bool { + return strings.ToLower(entries[i].Name()) < strings.ToLower(entries[j].Name()) + }) + return entries +} + +func cleanMacGardenCandidate(s string) string { + s = strings.NewReplacer("$", "", "@", " ", "\"", " ").Replace(s) + s = strings.TrimSpace(s) + s = strings.Trim(s, ".,:;()[]{}<>' ") + s = strings.Join(strings.Fields(s), " ") + if s == "" || s == "." { + return "" + } + return s +} + +func extractQuotedSegments(s string) []string { + segments := make([]string, 0, 2) + start := -1 + for i, r := range s { + if r != '"' { + continue + } + if start < 0 { + start = i + 1 + continue + } + if start <= i { + segments = append(segments, s[start:i]) + } + start = -1 + } + return segments +} + +func (m *MacGardenFileSystem) ensureItemForDir(dirName string, fallbackURL string) error { + dirName = strings.TrimSpace(pathBase(dirName)) + if dirName == "" { + return fs.ErrNotExist + } + m.mu.RLock() + itemURL := m.itemURLByDir[dirName] + m.mu.RUnlock() + if itemURL == "" { + itemURL = fallbackURL + } + if itemURL == "" { + return fs.ErrNotExist + } + + m.mu.RLock() + _, ok := m.itemByURL[itemURL] + m.mu.RUnlock() + if ok { + return nil + } + + item, err := m.client.GetSoftwareItem(itemURL) + if err != nil { + return err + } + m.mu.Lock() + m.itemByURL[itemURL] = item + m.itemURLByDir[dirName] = itemURL + m.mu.Unlock() + return nil +} + +func (m *MacGardenFileSystem) itemAssetsByDir(dirName string) ([]macGardenAsset, error) { + dirName = pathBase(dirName) + m.mu.RLock() + itemURL := m.itemURLByDir[dirName] + item := m.itemByURL[itemURL] + m.mu.RUnlock() + if itemURL == "" || item == nil { + return nil, fs.ErrNotExist + } + + netlog.Info("[AFP][MacGarden] building assets for %q: %d screenshot(s), %d download group(s)", dirName, len(item.Screenshots), len(item.Downloads)) + assets := make([]macGardenAsset, 0, len(item.Downloads)+len(item.Screenshots)+2) + txtPath := filepath.Join(dirName, "Description.txt") + htmlPath := filepath.Join(dirName, "Description.html") + descMac := strings.ReplaceAll(item.Description, "\n", "\r") + txtBytes := []byte(descMac) + htmlBytes := []byte("
" + htmlEscape(item.Description) + "
") + assets = append(assets, + macGardenAsset{Name: "Description.txt", Content: txtBytes, Size: int64(len(txtBytes))}, + macGardenAsset{Name: "Description.html", Content: htmlBytes, Size: int64(len(htmlBytes))}, + ) + + m.mu.Lock() + m.descriptionByPath[txtPath] = assets[0] + m.descriptionByPath[htmlPath] = assets[1] + m.mu.Unlock() + + // For each URL use the cached size if available; collect uncached URLs for + // background probing so this function never blocks on network I/O. + var needsProbe []string + + shotIdx := 1 + for _, shotURL := range item.Screenshots { + if !strings.HasPrefix(shotURL, "http://") && !strings.HasPrefix(shotURL, "https://") { + continue + } + name := fmt.Sprintf("Screenshots/Screenshot %02d %s", shotIdx, garden.FileNameFromURL(shotURL, "image")) + size, cached := m.client.CachedContentLength(shotURL) + if !cached { + netlog.Debug("[AFP][MacGarden] screenshot %d/%d not yet cached, will probe in background", shotIdx, len(item.Screenshots)) + needsProbe = append(needsProbe, shotURL) + } else { + netlog.Debug("[AFP][MacGarden] screenshot %d size: %d bytes (cached)", shotIdx, size) + } + asset := macGardenAsset{Name: name, URL: shotURL, Size: size} + assets = append(assets, asset) + m.mu.Lock() + m.screenshotByPath[filepath.Join(dirName, name)] = asset + m.mu.Unlock() + shotIdx++ + } + + for _, dl := range item.Downloads { + for _, link := range dl.Links { + if !strings.HasPrefix(link.URL, "http://") && !strings.HasPrefix(link.URL, "https://") { + continue + } + // Skip MD5 checksum links — they are not downloadable files. + if strings.Contains(link.URL, "arch_md5.php") { + continue + } + base := garden.FileNameFromURL(link.URL, dl.Title) + if base == "" { + base = sanitizeGardenName(dl.Title) + } + name := mirrorFolderForURL(link.URL) + "/" + base + size, cached := m.client.CachedContentLength(link.URL) + if !cached { + netlog.Debug("[AFP][MacGarden] download %q not yet cached, will probe in background", dl.Title) + needsProbe = append(needsProbe, link.URL) + } else { + netlog.Debug("[AFP][MacGarden] download %q size: %d bytes (cached)", dl.Title, size) + } + asset := macGardenAsset{Name: name, URL: link.URL, Size: size} + assets = append(assets, asset) + m.mu.Lock() + m.downloadByPath[filepath.Join(dirName, name)] = asset + m.mu.Unlock() + } + } + + if len(needsProbe) > 0 && m.client.FetchHead() { + netlog.Info("[AFP][MacGarden] probing %d uncached asset size(s) for %q in background", len(needsProbe), dirName) + urls := needsProbe + m.wg.Add(1) + go func() { + defer m.wg.Done() + for _, u := range urls { + select { + case <-m.stop: + return + default: + } + if _, err := m.client.HeadContentLength(u); err != nil { + netlog.Warn("[AFP][MacGarden] background probe failed for %q: %v", u, err) + } + } + netlog.Info("[AFP][MacGarden] background probe complete for %q", dirName) + }() + } + + netlog.Info("[AFP][MacGarden] built %d asset(s) for %q", len(assets), dirName) + return assets, nil +} + +func (m *MacGardenFileSystem) categoryByName(name string) (garden.Category, bool) { + for _, c := range m.categories { + if c.Name == name { + return c, true + } + } + return garden.Category{}, false +} + +func (m *MacGardenFileSystem) getCategoryURL(catName string) string { + m.loadCategories() + m.mu.RLock() + defer m.mu.RUnlock() + for _, c := range m.categories { + if c.Name == catName { + return c.URL + } + } + return "" +} + +func (m *MacGardenFileSystem) getCategoryPageMeta(catURL string) (macGardenCategoryPageMeta, error) { + m.mu.RLock() + if meta, ok := m.categoryPageMeta[catURL]; ok { + m.mu.RUnlock() + return meta, nil + } + m.mu.RUnlock() + + info, err := m.client.GetCategoryPageInfo(catURL) + if err != nil { + return macGardenCategoryPageMeta{}, err + } + meta := macGardenCategoryPageMeta{ + TotalCount: clampGardenCount(info.TotalCount), + PageSize: info.PageSize, + LastPageNumber: info.LastPageNumber, + LastPageCount: info.LastPageCount, + } + m.mu.Lock() + m.categoryPageMeta[catURL] = meta + m.categoryItemCount[catURL] = meta.TotalCount + m.cacheCategoryPageLocked(catURL, 0, info.FirstPage) + if info.LastPageNumber > 0 { + m.cacheCategoryPageLocked(catURL, info.LastPageNumber, info.LastPage) + } + m.mu.Unlock() + return meta, nil +} + +func (m *MacGardenFileSystem) getCategoryPage(catURL string, pageNumber int) ([]garden.SearchResult, error) { + m.mu.RLock() + if pages, ok := m.categoryPageItems[catURL]; ok { + if items, ok := pages[pageNumber]; ok { + cached := append([]garden.SearchResult(nil), items...) + m.mu.RUnlock() + return cached, nil + } + } + m.mu.RUnlock() + + items, err := m.client.GetCategoryPage(catURL, pageNumber) + if err != nil { + return nil, err + } + m.mu.Lock() + m.cacheCategoryPageLocked(catURL, pageNumber, items) + m.mu.Unlock() + return append([]garden.SearchResult(nil), items...), nil +} + +func (m *MacGardenFileSystem) cacheCategoryPageLocked(catURL string, pageNumber int, items []garden.SearchResult) { + if _, ok := m.categoryPageItems[catURL]; !ok { + m.categoryPageItems[catURL] = make(map[int][]garden.SearchResult) + } + cloned := append([]garden.SearchResult(nil), items...) + m.categoryPageItems[catURL][pageNumber] = cloned + for _, item := range cloned { + name := sanitizeGardenName(item.Name) + if name == "" { + continue + } + m.itemURLByDir[name] = item.URL + } +} + +func (m *MacGardenFileSystem) readCategoryDirRange(catURL string, startIndex uint16, reqCount uint16) ([]fs.DirEntry, uint16, error) { + if reqCount > macGardenEnumerateWindow { + reqCount = macGardenEnumerateWindow + } + meta, err := m.getCategoryPageMeta(catURL) + if err != nil { + return nil, 0, err + } + total := meta.TotalCount + if total == 0 { + return nil, 0, nil + } + if startIndex < 1 { + startIndex = 1 + } + if startIndex > total { + return nil, total, nil + } + if reqCount == 0 { + return nil, total, nil + } + pageSize := meta.PageSize + if pageSize <= 0 { + return nil, total, nil + } + startOffset := int(startIndex) - 1 + endOffset := startOffset + int(reqCount) + if endOffset > int(total) { + endOffset = int(total) + } + firstPage := startOffset / pageSize + lastPage := (endOffset - 1) / pageSize + results := make([]garden.SearchResult, 0, endOffset-startOffset) + for pageNumber := firstPage; pageNumber <= lastPage; pageNumber++ { + items, err := m.getCategoryPage(catURL, pageNumber) + if err != nil { + return nil, total, err + } + pageStart := 0 + if pageNumber == firstPage { + pageStart = startOffset - pageNumber*pageSize + } + pageEnd := len(items) + if pageNumber == lastPage { + pageLimit := endOffset - pageNumber*pageSize + if pageLimit < pageEnd { + pageEnd = pageLimit + } + } + if pageStart < 0 { + pageStart = 0 + } + if pageStart > len(items) { + pageStart = len(items) + } + if pageEnd < pageStart { + pageEnd = pageStart + } + results = append(results, items[pageStart:pageEnd]...) + } + entries := make([]fs.DirEntry, 0, len(results)) + for _, item := range results { + entries = append(entries, macGardenDirEntry{info: &macGardenFileInfo{name: sanitizeGardenName(item.Name), mode: fs.ModeDir | 0o555, isDir: true, modTime: time.Now().UTC()}}) + } + return entries, total, nil +} + +func (m *MacGardenFileSystem) getCategoryItems(catURL string) ([]garden.SearchResult, error) { + netlog.Debug("[AFP][MacGarden] getCategoryItems for URL: %s", catURL) + m.mu.RLock() + if items, ok := m.itemsInCategory[catURL]; ok { + m.mu.RUnlock() + netlog.Debug("[AFP][MacGarden] getCategoryItems found %d cached items for %s", len(items), catURL) + return items, nil + } + m.mu.RUnlock() + + meta, err := m.getCategoryPageMeta(catURL) + if err != nil { + netlog.Warn("[AFP][MacGarden] failed to fetch category page metadata: %v", err) + return nil, err + } + + netlog.Debug("[AFP][MacGarden] fetching all pages for category URL: %s", catURL) + items := make([]garden.SearchResult, 0, int(meta.TotalCount)) + for pageNumber := 0; pageNumber <= meta.LastPageNumber; pageNumber++ { + pageItems, err := m.getCategoryPage(catURL, pageNumber) + if err != nil { + netlog.Warn("[AFP][MacGarden] failed to fetch category page %d: %v", pageNumber, err) + return nil, err + } + items = append(items, pageItems...) + } + + netlog.Info("[AFP][MacGarden] got %d items from category %s", len(items), catURL) + m.mu.Lock() + m.itemsInCategory[catURL] = items + m.categoryItemCount[catURL] = clampGardenCount(len(items)) + m.mu.Unlock() + return items, nil +} + +func clampGardenCount(count int) uint16 { + if count <= 0 { + return 0 + } + if count > 0xffff { + return 0xffff + } + return uint16(count) +} + +func (m *MacGardenFileSystem) countCategoriesWithPrefix(prefix string) uint16 { + m.mu.RLock() + defer m.mu.RUnlock() + count := uint16(0) + for _, cat := range m.categories { + if strings.HasPrefix(strings.ToLower(urlPathFromAbsolute(cat.URL)), prefix) { + count++ + } + } + return count +} + +func (m *MacGardenFileSystem) getItemURLInCategory(catURL string, itemName string) (string, error) { + // Fast path: if the item URL is already cached from prior ranged enumeration, + // avoid forcing a full category crawl. + m.mu.RLock() + if cachedURL := m.itemURLByDir[itemName]; cachedURL != "" { + m.mu.RUnlock() + return cachedURL, nil + } + if cachedItems, ok := m.itemsInCategory[catURL]; ok { + for _, item := range cachedItems { + if sanitizeGardenName(item.Name) == itemName { + m.mu.RUnlock() + return item.URL, nil + } + } + } + if cachedPages, ok := m.categoryPageItems[catURL]; ok { + for _, pageItems := range cachedPages { + for _, item := range pageItems { + if sanitizeGardenName(item.Name) == itemName { + m.mu.RUnlock() + return item.URL, nil + } + } + } + } + m.mu.RUnlock() + + meta, err := m.getCategoryPageMeta(catURL) + if err != nil { + return "", err + } + + for pageNumber := 0; pageNumber <= meta.LastPageNumber; pageNumber++ { + pageItems, err := m.getCategoryPage(catURL, pageNumber) + if err != nil { + return "", err + } + for _, item := range pageItems { + if sanitizeGardenName(item.Name) == itemName { + return item.URL, nil + } + } + } + return "", fs.ErrNotExist +} + +func isSearchResultType(s string) bool { return s == "App" || s == "Game" } + +func sanitizeGardenName(s string) string { + s = strings.TrimSpace(s) + replacer := strings.NewReplacer( + "\\", "_", + "/", "_", + ":", "-", + "*", "_", + "?", "", + "\"", "", + "<", "(", + ">", ")", + "|", "_", + ) + s = replacer.Replace(s) + if s == "" { + return "Item" + } + return s +} + +func htmlEscape(s string) string { + s = strings.ReplaceAll(s, "&", "&") + s = strings.ReplaceAll(s, "<", "<") + s = strings.ReplaceAll(s, ">", ">") + return s +} + +func pathBase(s string) string { + s = filepath.ToSlash(s) + parts := strings.Split(s, "/") + return parts[len(parts)-1] +} + +func pathDir(s string) string { + s = filepath.ToSlash(s) + idx := strings.LastIndex(s, "/") + if idx < 0 { + return "" + } + return s[:idx] +} + +func urlPathFromAbsolute(absURL string) string { + u, err := url.Parse(absURL) + if err != nil { + return "" + } + return u.Path +} + +var _ afp.FileSystem = (*MacGardenFileSystem)(nil) + +var errMacGardenNotFound = errors.New("macgarden: not found") diff --git a/service/afpfs/macgarden/fs_test.go b/service/afpfs/macgarden/fs_test.go new file mode 100644 index 0000000..9ea0b21 --- /dev/null +++ b/service/afpfs/macgarden/fs_test.go @@ -0,0 +1,278 @@ +//go:build (afp && macgarden) || all + +package macgarden + +import ( + "io/fs" + "path/filepath" + "testing" + + "github.com/pgodw/omnitalk/service/afp" + garden "github.com/pgodw/omnitalk/service/macgarden" +) + +func TestMacGardenChildCount_CategoryIsLazyUntilCached(t *testing.T) { + root := filepath.Clean(t.TempDir()) + fsys := &MacGardenFileSystem{ + root: root, + categories: []garden.Category{{Name: "Antivirus", URL: "https://macintoshgarden.org/apps/utilities/antivirus"}}, + categoryItemCount: make(map[string]uint16), + categoryPageMeta: make(map[string]macGardenCategoryPageMeta), + categoryPageItems: make(map[string]map[int][]garden.SearchResult), + } + + count, err := fsys.ChildCount(filepath.Join(root, "Apps", "Antivirus")) + if err != nil { + t.Fatalf("ChildCount returned error: %v", err) + } + if count != 0 { + t.Fatalf("uncached category count = %d, want 0", count) + } + + fsys.categoryItemCount["https://macintoshgarden.org/apps/utilities/antivirus"] = 7 + count, err = fsys.ChildCount(filepath.Join(root, "Apps", "Antivirus")) + if err != nil { + t.Fatalf("ChildCount cached returned error: %v", err) + } + if count != 7 { + t.Fatalf("cached category count = %d, want 7", count) + } +} + +func TestMacGardenReadDirRange_UsesCachedFirstAndLastPages(t *testing.T) { + root := filepath.Clean(t.TempDir()) + catURL := "https://macintoshgarden.org/apps/utilities/antivirus" + fsys := &MacGardenFileSystem{ + root: root, + categories: []garden.Category{{Name: "Antivirus", URL: catURL}}, + categoryItemCount: make(map[string]uint16), + categoryPageMeta: map[string]macGardenCategoryPageMeta{ + catURL: {TotalCount: 5, PageSize: 2, LastPageNumber: 2, LastPageCount: 1}, + }, + categoryPageItems: map[string]map[int][]garden.SearchResult{ + catURL: { + 0: { + {Name: "Anti-Virus Boot Disk", URL: "https://macintoshgarden.org/apps/anti-virus-boot-disk"}, + {Name: "ClamAV upgrade for Leopard Server", URL: "https://macintoshgarden.org/apps/clamav-upgrade-leopard-server"}, + }, + 2: { + {Name: "SecureInit", URL: "https://macintoshgarden.org/apps/secureinit"}, + }, + }, + }, + itemURLByDir: make(map[string]string), + } + fsys.cacheCategoryPageLocked(catURL, 0, fsys.categoryPageItems[catURL][0]) + fsys.cacheCategoryPageLocked(catURL, 2, fsys.categoryPageItems[catURL][2]) + + entries, total, err := fsys.ReadDirRange(filepath.Join(root, "Apps", "Antivirus"), 1, 2) + if err != nil { + t.Fatalf("ReadDirRange first page: %v", err) + } + if total != 5 { + t.Fatalf("total = %d, want 5", total) + } + if len(entries) != 2 || entries[0].Name() != "Anti-Virus Boot Disk" || entries[1].Name() != "ClamAV upgrade for Leopard Server" { + t.Fatalf("first page entries = %#v", entries) + } + + entries, total, err = fsys.ReadDirRange(filepath.Join(root, "Apps", "Antivirus"), 5, 1) + if err != nil { + t.Fatalf("ReadDirRange last page: %v", err) + } + if total != 5 { + t.Fatalf("last-page total = %d, want 5", total) + } + if len(entries) != 1 || entries[0].Name() != "SecureInit" { + t.Fatalf("last page entries = %#v", entries) + } + if got := fsys.itemURLByDir["SecureInit"]; got != "https://macintoshgarden.org/apps/secureinit" { + t.Fatalf("cached item URL = %q, want secureinit URL", got) + } +} + +func TestMacGardenGetItemURLInCategory_UsesCachedPageItems(t *testing.T) { + catURL := "https://macintoshgarden.org/apps/utilities/antivirus" + fsys := &MacGardenFileSystem{ + categoryPageItems: map[string]map[int][]garden.SearchResult{ + catURL: { + 0: { + {Name: "SecureInit", URL: "https://macintoshgarden.org/apps/secureinit"}, + }, + }, + }, + itemURLByDir: make(map[string]string), + } + + got, err := fsys.getItemURLInCategory(catURL, "SecureInit") + if err != nil { + t.Fatalf("getItemURLInCategory error: %v", err) + } + if got != "https://macintoshgarden.org/apps/secureinit" { + t.Fatalf("item URL = %q, want secureinit URL", got) + } +} + +func TestMacGardenReadDirRange_CategoryReqCountIsCappedToFirstWindow(t *testing.T) { + root := filepath.Clean(t.TempDir()) + catURL := "https://macintoshgarden.org/apps/utilities/antivirus" + firstPage := make([]garden.SearchResult, 0, 10) + for i := 1; i <= 10; i++ { + firstPage = append(firstPage, garden.SearchResult{ + Name: "Item " + string(rune('A'+i-1)), + URL: "https://macintoshgarden.org/apps/item-" + string(rune('a'+i-1)), + }) + } + + fsys := &MacGardenFileSystem{ + root: root, + categories: []garden.Category{{Name: "Antivirus", URL: catURL}}, + categoryItemCount: make(map[string]uint16), + categoryPageMeta: map[string]macGardenCategoryPageMeta{ + catURL: {TotalCount: 100, PageSize: 10, LastPageNumber: 9, LastPageCount: 10}, + }, + categoryPageItems: map[string]map[int][]garden.SearchResult{ + catURL: { + 0: firstPage, + }, + }, + itemURLByDir: make(map[string]string), + } + + entries, total, err := fsys.ReadDirRange(filepath.Join(root, "Apps", "Antivirus"), 1, 64) + if err != nil { + t.Fatalf("ReadDirRange: %v", err) + } + if total != 100 { + t.Fatalf("total = %d, want 100", total) + } + if len(entries) != 10 { + t.Fatalf("len(entries) = %d, want 10", len(entries)) + } +} +func TestMacGardenStat_ItemChildIsLazyUntilItemOpened(t *testing.T) { + root := filepath.Clean(t.TempDir()) + catURL := "https://macintoshgarden.org/apps/visual-arts-graphics/3d-rendering-cad" + itemURL := "https://macintoshgarden.org/apps/alias-upfront-20" + + fsys := &MacGardenFileSystem{ + root: root, + categories: []garden.Category{{Name: "3D Rendering & CAD", URL: catURL}}, + itemURLByDir: map[string]string{"Alias upFRONT 2.0": itemURL}, + itemByURL: make(map[string]*garden.SoftwareItem), + } + + _, err := fsys.Stat(filepath.Join(root, "Apps", "3D Rendering & CAD", "Alias upFRONT 2.0", "Configuration")) + if err == nil { + t.Fatal("expected fs.ErrNotExist for unopened item child path") + } + if err != fs.ErrNotExist { + t.Fatalf("Stat error = %v, want %v", err, fs.ErrNotExist) + } + if len(fsys.itemByURL) != 0 { + t.Fatalf("item cache size = %d, want 0 (no lazy fetch)", len(fsys.itemByURL)) + } +} + +func TestMacGardenReadDir_ItemSkipsAssetsWhenHeadFails(t *testing.T) { + root := filepath.Clean(t.TempDir()) + catURL := "https://macintoshgarden.org/apps/visual-arts-graphics/3d-rendering-cad" + itemURL := "https://macintoshgarden.org/apps/alias-upfront-20" + fsys := &MacGardenFileSystem{ + root: root, + client: garden.NewClient(), + categories: []garden.Category{{Name: "3D Rendering & CAD", URL: catURL}}, + itemURLByDir: map[string]string{"Alias upFRONT 2.0": itemURL}, + itemByURL: map[string]*garden.SoftwareItem{ + itemURL: { + Title: "Alias upFRONT 2.0", + URL: itemURL, + Description: "desc", + Screenshots: []string{"://bad-screenshot-url"}, + Downloads: []garden.DownloadDetails{{ + Title: "Alias upFRONT 2.0", + Links: []garden.DownloadLink{{Text: "Download", URL: "://bad-download-url"}}, + }}, + }, + }, + downloadByPath: make(map[string]macGardenAsset), + screenshotByPath: make(map[string]macGardenAsset), + descriptionByPath: make(map[string]macGardenAsset), + } + + entries, err := fsys.ReadDir(filepath.Join(root, "Apps", "3D Rendering & CAD", "Alias upFRONT 2.0")) + if err != nil { + t.Fatalf("ReadDir: %v", err) + } + if len(entries) != 2 { + t.Fatalf("len(entries) = %d, want 2 description files only", len(entries)) + } + names := map[string]bool{} + for _, e := range entries { + names[e.Name()] = true + } + if !names["Description.txt"] || !names["Description.html"] { + t.Fatalf("entries = %#v, want description files only", entries) + } +} + +func TestMacGardenStat_SearchHitRootDirExists(t *testing.T) { + root := filepath.Clean(t.TempDir()) + fsys := &MacGardenFileSystem{ + root: root, + searchByName: map[string]macGardenCachedResult{ + "ClarisWorks 4.0": {Name: "ClarisWorks 4.0", URL: "https://macintoshgarden.org/apps/clarisworks-40"}, + }, + } + + info, err := fsys.Stat(filepath.Join(root, "ClarisWorks 4.0")) + if err != nil { + t.Fatalf("Stat search-hit root dir: %v", err) + } + if !info.IsDir() { + t.Fatalf("search-hit info IsDir = false, want true") + } +} + +func TestNormalizeMacGardenSearchQuery_StripsFinderNoise(t *testing.T) { + got := normalizeMacGardenSearchQuery(`. " clarisworks$ @ "`) + if got != "clarisworks" { + t.Fatalf("normalizeMacGardenSearchQuery() = %q, want %q", got, "clarisworks") + } +} + +func TestMacGardenCatSearch_UsesTypeSubdirectoryWhenKnown(t *testing.T) { + root := filepath.Clean(t.TempDir()) + query := "clarisworks" + fsys := &MacGardenFileSystem{ + root: root, + catSearchCache: map[string]*macGardenSearchCache{ + query: { + pages: map[int][]garden.SearchResult{ + 0: { + {Name: "ClarisWorks 4.0", URL: "https://macintoshgarden.org/apps/clarisworks-40", Type: "App"}, + {Name: "Mystery Result", URL: "https://macintoshgarden.org/apps/mystery", Type: ""}, + }, + }, + exhausted: true, + }, + }, + searchByName: make(map[string]macGardenCachedResult), + itemURLByDir: make(map[string]string), + } + + cursor := [16]byte{0x01, 'c', 'l', 'a'} // continuation + query hash for "cla..." + paths, _, errCode := fsys.CatSearch("", query, 10, cursor) + if errCode != afp.NoErr { + t.Fatalf("CatSearch errCode=%d, want %d", errCode, afp.NoErr) + } + if len(paths) != 2 { + t.Fatalf("len(paths)=%d, want 2", len(paths)) + } + if paths[0] != filepath.Join(root, "search", query, "App", "ClarisWorks 4.0") { + t.Fatalf("paths[0]=%q, want typed path", paths[0]) + } + if paths[1] != filepath.Join(root, "search", query, "Mystery Result") { + t.Fatalf("paths[1]=%q, want legacy untyped path", paths[1]) + } +} diff --git a/service/asp/asp.go b/service/asp/asp.go index 98d6404..1c1fbc9 100644 --- a/service/asp/asp.go +++ b/service/asp/asp.go @@ -1,3 +1,5 @@ +//go:build afp || all + /* Package asp implements the AppleTalk Session Protocol (ASP) as a omnitalk service. The ATP transaction layer is provided by go/service/atp; this file @@ -13,15 +15,17 @@ import ( "context" "encoding/binary" "fmt" + "sync" "time" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" - "github.com/pgodw/omnitalk/go/service/afp" - "github.com/pgodw/omnitalk/go/service/atp" - "github.com/pgodw/omnitalk/go/service/zip" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/service" + "github.com/pgodw/omnitalk/service/afp" + "github.com/pgodw/omnitalk/service/atp" + "github.com/pgodw/omnitalk/service/zip" ) // ServerSocket is the well-known AppleTalk socket for the AFP/ASP server. @@ -41,7 +45,7 @@ type Service struct { maxCmdSize int quantumSize int - router service.Router + router service.DatagramRouter registeredZones [][]byte endpoint *atp.Endpoint @@ -50,6 +54,13 @@ type Service struct { onSessionOpen func(*Session) onSessionClose func(*Session) onSessionActivity func(*Session) + + // lifeCtx is cancelled in Stop so background drain goroutines spawned + // for tickles, attentions, and write completions exit promptly instead + // of holding onto the ATP pending transaction past shutdown. + lifeCtx context.Context + lifeCancel context.CancelFunc + wg sync.WaitGroup } // Spec-to-implementation mapping notes: @@ -63,7 +74,7 @@ type Service struct { // requestContext is what the host service threads through atp.HandleInbound // so the Sender bridge can use router.Reply on the way out. type requestContext struct { - d appletalk.Datagram + d ddp.Datagram p port.Port } @@ -99,6 +110,12 @@ func (s *Service) SetCommandHandler(handler afp.CommandHandler) { // Socket returns the socket number this service listens on. func (s *Service) Socket() uint8 { return ServerSocket } +// MaxReadSize implements afp.Transport. Returns ASP's negotiated quantum so +// AFP can cap per-read allocations (e.g. HTTP range requests for virtual +// filesystems) to what one ASP reply can carry. Zero before Start runs +// SPGetParms. +func (s *Service) MaxReadSize() int { return s.quantumSize } + // Start performs server-side initialization corresponding to: // - SPGetParms (server end; server ASP client -> ASP) // - SPInit (server end; server ASP client -> ASP) @@ -106,8 +123,9 @@ func (s *Service) Socket() uint8 { return ServerSocket } // In this implementation, SPInit is represented by wiring the SLS endpoint and // validating ServiceStatusBlock size against QuantumSize before accepting // traffic. -func (s *Service) Start(router service.Router) error { +func (s *Service) Start(ctx context.Context, router service.Router) error { s.router = router + s.lifeCtx, s.lifeCancel = context.WithCancel(ctx) parms := s.SPGetParms() s.maxCmdSize = int(parms.MaxCmdSize) @@ -165,12 +183,26 @@ func (s *Service) Stop() error { for _, z := range s.registeredZones { s.nbp.UnregisterName([]byte(s.serverName), []byte(nbpType), z) } + if s.lifeCancel != nil { + s.lifeCancel() + } + s.wg.Wait() s.sm.Stop() return nil } +// drainCtx returns the lifecycle context for background drain goroutines. +// Falls back to context.Background() if Start has not been called yet +// (only happens in tests that exercise individual handlers in isolation). +func (s *Service) drainCtx() context.Context { + if s.lifeCtx != nil { + return s.lifeCtx + } + return context.Background() +} + // Inbound accepts an incoming DDP datagram. ATP type only. -func (s *Service) Inbound(d appletalk.Datagram, p port.Port) { +func (s *Service) Inbound(d ddp.Datagram, p port.Port) { if d.DDPType != atp.DDPTypeATP { return } @@ -195,7 +227,7 @@ func (s *Service) sendBridge(src, dst atp.Address, payload []byte, hint any) err s.router.Reply(rc.d, rc.p, atp.DDPTypeATP, payload) return nil } - dg := appletalk.Datagram{ + dg := ddp.Datagram{ HopCount: 0, DestinationNetwork: dst.Net, DestinationNode: dst.Node, @@ -209,6 +241,43 @@ func (s *Service) sendBridge(src, dst atp.Address, payload []byte, hint any) err return s.router.Route(dg, true) } +// sessionedReplier is the shared prologue for SPCommand/SPWrite, both of +// which carry (CmdBlock, SessionID, SeqNum) and require: cmdblock-size cap, +// session lookup, activity touch, and ASP-level duplicate filter. Returns +// (sess, true) on success; on rejection it has already replied and returns +// (_, false). label is used for log lines so failures point to the right path. +func (s *Service) sessionedReplier(label string, sessionID uint8, seqNum uint16, cmdBlockLen int, tid uint16, reply atp.Replier) (*Session, bool) { + if cmdBlockLen > s.effectiveMaxCmdSize() { + netlog.Debug("[ASP] %s: CmdBlockSize=%d exceeds MaxCmdSize=%d (SPErrorSizeErr)", + label, cmdBlockLen, s.effectiveMaxCmdSize()) + reply(atp.ResponseMessage{ + Buffers: [][]byte{nil}, + UserBytes: []uint32{errToUserBytes(SPErrorSizeErr)}, + }) + return nil, false + } + sess := s.sm.Get(sessionID) + if sess == nil || !sess.isOpen() { + netlog.Debug("[ASP] %s: unknown or closing SessRefNum=%d", label, sessionID) + reply(atp.ResponseMessage{ + Buffers: [][]byte{nil}, + UserBytes: []uint32{errToUserBytes(SPErrorParamErr)}, + }) + return nil, false + } + sess.touchActivity() + if s.onSessionActivity != nil { + s.onSessionActivity(sess) + } + if !sess.CheckDuplicate(seqNum, tid) { + netlog.Debug("[ASP] %s: ASP-level duplicate seqNum=%d on sess=%d, dropping", + label, seqNum, sessionID) + reply(atp.ResponseMessage{Buffers: [][]byte{nil}}) + return nil, false + } + return sess, true +} + // handleATPRequest is the server-side dispatcher for ASP network requests. // Direction by SPFunction per spec: // - workstation -> server: OpenSess, GetStatus, Command, Write, CloseSess @@ -232,7 +301,7 @@ func (s *Service) handleATPRequest(in atp.IncomingRequest, reply atp.Replier) { // no buffers reserved is invalid, but the engine will still create // an RspCB for XO; we reply with an empty message to drain it). sessID := uint8((in.UserBytes >> 16) & 0xFF) - if sess := s.sm.Get(sessID); sess != nil { + if sess := s.sm.Get(sessID); sess != nil && sess.isOpen() { sess.touchActivity() if s.onSessionActivity != nil { s.onSessionActivity(sess) @@ -351,8 +420,9 @@ func (s *Service) handleOpenSession(in atp.IncomingRequest, reply atp.Replier) { // maps them to server-side SPCloseSession semantics. func (s *Service) handleCloseSession(in atp.IncomingRequest, reply atp.Replier) { pkt := ParseCloseSessPacket(in.UserBytes) - if s.sm.Get(pkt.SessionID) == nil { - netlog.Debug("[ASP] CloseSess: unknown SessRefNum=%d", pkt.SessionID) + sess := s.sm.Get(pkt.SessionID) + if sess == nil || !sess.isOpen() { + netlog.Debug("[ASP] CloseSess: unknown or already closing SessRefNum=%d", pkt.SessionID) reply(atp.ResponseMessage{ Buffers: [][]byte{nil}, UserBytes: []uint32{errToUserBytes(SPErrorParamErr)}, @@ -375,35 +445,7 @@ func (s *Service) handleCloseSession(in atp.IncomingRequest, reply atp.Replier) func (s *Service) handleCommand(in atp.IncomingRequest, reply atp.Replier) { receivedAt := time.Now() pkt := ParseCommandPacket(in.UserBytes, in.Data) - if len(pkt.CmdBlock) > s.effectiveMaxCmdSize() { - netlog.Debug("[ASP] Command: CmdBlockSize=%d exceeds MaxCmdSize=%d (SPErrorSizeErr)", - len(pkt.CmdBlock), s.effectiveMaxCmdSize()) - reply(atp.ResponseMessage{ - Buffers: [][]byte{nil}, - UserBytes: []uint32{errToUserBytes(SPErrorSizeErr)}, - }) - return - } - sess := s.sm.Get(pkt.SessionID) - if sess == nil { - netlog.Debug("[ASP] Command: unknown SessRefNum=%d", pkt.SessionID) - reply(atp.ResponseMessage{ - Buffers: [][]byte{nil}, - UserBytes: []uint32{errToUserBytes(SPErrorParamErr)}, - }) - return - } - sess.touchActivity() - if s.onSessionActivity != nil { - s.onSessionActivity(sess) - } - if !sess.CheckDuplicate(pkt.SeqNum, in.TID) { - netlog.Debug("[ASP] Command: ASP-level duplicate seqNum=%d on sess=%d, dropping", - pkt.SeqNum, pkt.SessionID) - // We must still respond — the ATP engine will use cached response - // from the RspCB if it sees a true ATP retransmit; for ASP-level - // duplicates we send an empty result. - reply(atp.ResponseMessage{Buffers: [][]byte{nil}}) + if _, ok := s.sessionedReplier("Command", pkt.SessionID, pkt.SeqNum, len(pkt.CmdBlock), in.TID, reply); !ok { return } @@ -412,24 +454,15 @@ func (s *Service) handleCommand(in atp.IncomingRequest, reply atp.Replier) { if s.commandHandler != nil { replyData, errCode = s.commandHandler.HandleCommand(pkt.CmdBlock) } + + // Per AFP-over-ASP spec: FPRead, FPWrite, FPEnumerate can succeed partially. + // If the reply exceeds QuantumSize, truncate it here but preserve the original + // AFP error code (e.g., ErrEOFErr or NoErr). The workstation will make + // additional requests at adjusted offsets to retrieve the rest. if len(replyData) > s.effectiveQuantumSize() { - netlog.Debug("[ASP] Command: SessRefNum=%d CmdReplyDataSize=%d exceeds QuantumSize=%d (SPErrorSizeErr)", - pkt.SessionID, len(replyData), s.effectiveQuantumSize()) - reply(atp.ResponseMessage{ - Buffers: [][]byte{nil}, - UserBytes: []uint32{errToUserBytes(SPErrorSizeErr)}, - }) - return - } - if wsCap := bitmapMaxBytes(in.Bitmap); wsCap > 0 && len(replyData) > wsCap { - netlog.Debug("[ASP] Command: reply %d exceeds workstation capacity %d (SPErrorBufTooSmall)", - len(replyData), wsCap) - bufs := s.chunkResponse(replyData, in.Bitmap) - reply(atp.ResponseMessage{ - Buffers: bufs, - UserBytes: []uint32{errToUserBytes(SPErrorBufTooSmall)}, - }) - return + netlog.Debug("[ASP] Command: SessRefNum=%d CmdReplyDataSize=%d exceeds QuantumSize=%d (truncating, preserving errCode=%d)", + pkt.SessionID, len(replyData), s.effectiveQuantumSize(), errCode) + replyData = replyData[:s.effectiveQuantumSize()] } bufs := s.chunkResponse(replyData, in.Bitmap) reply(atp.ResponseMessage{ @@ -463,32 +496,8 @@ func (s *Service) handleCommand(in atp.IncomingRequest, reply atp.Replier) { func (s *Service) handleASPWrite(in atp.IncomingRequest, reply atp.Replier) { receivedAt := time.Now() pkt := ParseWritePacket(in.UserBytes, in.Data) - if len(pkt.CmdBlock) > s.effectiveMaxCmdSize() { - netlog.Debug("[ASP] Write: CmdBlockSize=%d exceeds MaxCmdSize=%d (SPErrorSizeErr)", - len(pkt.CmdBlock), s.effectiveMaxCmdSize()) - reply(atp.ResponseMessage{ - Buffers: [][]byte{nil}, - UserBytes: []uint32{errToUserBytes(SPErrorSizeErr)}, - }) - return - } - sess := s.sm.Get(pkt.SessionID) - if sess == nil { - netlog.Debug("[ASP] Write: unknown SessRefNum=%d", pkt.SessionID) - reply(atp.ResponseMessage{ - Buffers: [][]byte{nil}, - UserBytes: []uint32{errToUserBytes(SPErrorParamErr)}, - }) - return - } - sess.touchActivity() - if s.onSessionActivity != nil { - s.onSessionActivity(sess) - } - if !sess.CheckDuplicate(pkt.SeqNum, in.TID) { - netlog.Debug("[ASP] Write: duplicate seqNum=%d on sess=%d, dropping", - pkt.SeqNum, pkt.SessionID) - reply(atp.ResponseMessage{Buffers: [][]byte{nil}}) + sess, ok := s.sessionedReplier("Write", pkt.SessionID, pkt.SeqNum, len(pkt.CmdBlock), in.TID, reply) + if !ok { return } @@ -553,16 +562,25 @@ func (s *Service) handleASPWrite(in atp.IncomingRequest, reply atp.Replier) { return } - // Stash the in-flight write so CloseSess can cancel it. - sess.writeMu.Lock() - sess.write = &writeState{ + // Record the in-flight write so CloseSess can cancel it. A second + // Write before this one resolves is a protocol violation (the Mac + // serialises Write commands behind seqNum); reject it loudly rather + // than silently overwrite. + if !sess.beginWrite(&writeState{ seqNum: pkt.SeqNum, cmdBlock: pkt.CmdBlock, wantBytes: wantBytes, reply: reply, pending: pending, + }) { + netlog.Warn("[ASP] Write sess=%d: write already in flight (protocol violation), cancelling new request", pkt.SessionID) + pending.Cancel() + reply(atp.ResponseMessage{ + Buffers: [][]byte{nil}, + UserBytes: []uint32{errToUserBytes(SPErrorParamErr)}, + }) + return } - sess.writeMu.Unlock() wcSentAt := time.Now() @@ -576,12 +594,10 @@ func (s *Service) handleASPWrite(in atp.IncomingRequest, reply atp.Replier) { // returned write data, then sends the SPWrtReply-equivalent result. func (s *Service) completeWrite(sess *Session, cmdBlock []byte, wantBytes uint32, pending *atp.Pending, reply atp.Replier, bitmap uint8, receivedAt, wcSentAt time.Time) { - resp, err := pending.Wait(context.Background()) + resp, err := pending.Wait(s.drainCtx()) wcRTT := time.Since(wcSentAt) // Clear the pending state regardless of outcome. - sess.writeMu.Lock() - sess.write = nil - sess.writeMu.Unlock() + sess.endWrite() if err != nil { netlog.Debug("[ASP] Write sess=%d: WriteContinue failed after %v: %v", sess.ID, wcRTT.Round(time.Millisecond), err) @@ -612,24 +628,15 @@ func (s *Service) completeWrite(sess *Session, cmdBlock []byte, wantBytes uint32 if s.commandHandler != nil { replyData, errCode = s.commandHandler.HandleCommand(full) } + + // Per AFP-over-ASP spec: FPRead, FPWrite, FPEnumerate can succeed partially. + // If the reply exceeds QuantumSize, truncate it here but preserve the original + // AFP error code. The workstation will make additional requests at adjusted + // offsets to retrieve the rest. if len(replyData) > s.effectiveQuantumSize() { - netlog.Debug("[ASP] Write: SessRefNum=%d WrtReplyDataSize=%d exceeds QuantumSize=%d (SPErrorSizeErr)", - sess.ID, len(replyData), s.effectiveQuantumSize()) - reply(atp.ResponseMessage{ - Buffers: [][]byte{nil}, - UserBytes: []uint32{errToUserBytes(SPErrorSizeErr)}, - }) - return - } - if wsCap := bitmapMaxBytes(bitmap); wsCap > 0 && len(replyData) > wsCap { - netlog.Debug("[ASP] Write: reply %d exceeds workstation capacity %d (SPErrorBufTooSmall)", - len(replyData), wsCap) - bufs := s.chunkResponse(replyData, bitmap) - reply(atp.ResponseMessage{ - Buffers: bufs, - UserBytes: []uint32{errToUserBytes(SPErrorBufTooSmall)}, - }) - return + netlog.Debug("[ASP] Write: SessRefNum=%d WrtReplyDataSize=%d exceeds QuantumSize=%d (truncating, preserving errCode=%d)", + sess.ID, len(replyData), s.effectiveQuantumSize(), errCode) + replyData = replyData[:s.effectiveQuantumSize()] } bufs := s.chunkResponse(replyData, bitmap) reply(atp.ResponseMessage{ @@ -667,7 +674,11 @@ func (s *Service) sendTickle(sess *Session) { } // Drain in the background — we don't actually need the response, but // we must release the TCB. - go func() { _, _ = pending.Wait(context.Background()) }() + s.wg.Add(1) + go func() { + defer s.wg.Done() + _, _ = pending.Wait(s.drainCtx()) + }() } // errToUserBytes converts a (possibly negative) ASP error constant into the @@ -721,7 +732,11 @@ func (s *Service) SendAttention(sessID uint8, code uint16) error { if err != nil { return err } - go func() { _, _ = pending.Wait(context.Background()) }() + s.wg.Add(1) + go func() { + defer s.wg.Done() + _, _ = pending.Wait(s.drainCtx()) + }() netlog.Debug("[ASP] SendAttention: sess=%d code=0x%04X", sessID, code) return nil } diff --git a/service/asp/asp_test.go b/service/asp/asp_test.go index e196ec7..3c9ddc9 100644 --- a/service/asp/asp_test.go +++ b/service/asp/asp_test.go @@ -1,10 +1,12 @@ +//go:build afp || all + package asp import ( "encoding/binary" "testing" - "github.com/pgodw/omnitalk/go/service/atp" + "github.com/pgodw/omnitalk/service/atp" ) type stubCommandHandler struct { @@ -54,7 +56,10 @@ func TestHandleCloseSessionUnknownSessionReturnsParamErr(t *testing.T) { } } -func TestHandleCommandReplyOverQuantumReturnsSizeErr(t *testing.T) { +func TestHandleCommandReplyOverQuantumGetsTruncated(t *testing.T) { + // Per AFP spec: FPRead, FPWrite, FPEnumerate can return partially. + // When reply exceeds QuantumSize, ASP should truncate and preserve the + // original AFP error code, allowing workstation to make additional requests. h := stubCommandHandler{reply: make([]byte, 12), err: SPErrorNoError} s := New("test", h, nil, nil) s.quantumSize = 8 @@ -70,8 +75,17 @@ func TestHandleCommandReplyOverQuantumReturnsSizeErr(t *testing.T) { var got atp.ResponseMessage s.handleCommand(in, func(m atp.ResponseMessage) { got = m }) - if len(got.UserBytes) != 1 || got.UserBytes[0] != errToUserBytes(SPErrorSizeErr) { - t.Fatalf("expected SizeErr user bytes, got %#v", got.UserBytes) + // Should preserve the NoError code and truncate to quantum size + if len(got.UserBytes) != 1 || got.UserBytes[0] != errToUserBytes(SPErrorNoError) { + t.Fatalf("expected NoError user bytes, got %#v", got.UserBytes) + } + // Check that data was truncated to quantum size + totalReplyLen := 0 + for _, buf := range got.Buffers { + totalReplyLen += len(buf) + } + if totalReplyLen > 8 { + t.Fatalf("reply %d bytes exceeds quantum size 8", totalReplyLen) } } @@ -109,7 +123,7 @@ func TestHandleCommandCmdBlockOverMaxReturnsSizeErr(t *testing.T) { } } -func TestHandleCommandReplyOverWorkstationCapacityReturnsBufTooSmall(t *testing.T) { +func TestHandleCommandReplyOverWorkstationCapacityGetsTruncated(t *testing.T) { h := stubCommandHandler{reply: make([]byte, ATPMaxData+10), err: SPErrorNoError} s := New("test", h, nil, nil) s.maxCmdSize = ATPMaxData @@ -126,8 +140,15 @@ func TestHandleCommandReplyOverWorkstationCapacityReturnsBufTooSmall(t *testing. var got atp.ResponseMessage s.handleCommand(in, func(m atp.ResponseMessage) { got = m }) - if len(got.UserBytes) != 1 || got.UserBytes[0] != errToUserBytes(SPErrorBufTooSmall) { - t.Fatalf("expected BufTooSmall user bytes, got %#v", got.UserBytes) + if len(got.UserBytes) != 1 || got.UserBytes[0] != errToUserBytes(SPErrorNoError) { + t.Fatalf("expected NoError user bytes, got %#v", got.UserBytes) + } + totalReplyLen := 0 + for _, buf := range got.Buffers { + totalReplyLen += len(buf) + } + if totalReplyLen > ATPMaxData { + t.Fatalf("reply %d bytes exceeds bitmap capacity %d", totalReplyLen, ATPMaxData) } } diff --git a/service/asp/seqfilter_test.go b/service/asp/seqfilter_test.go new file mode 100644 index 0000000..ec2533e --- /dev/null +++ b/service/asp/seqfilter_test.go @@ -0,0 +1,28 @@ +//go:build afp || all + +package asp + +import "testing" + +func TestSeqFilter(t *testing.T) { + t.Parallel() + tests := []struct { + name string + seq uint16 + tid uint16 + want bool + }{ + {"first message accepted", 0, 100, true}, + {"new seq accepted", 1, 101, true}, + {"same seq same tid is ATP retransmit, accepted", 1, 101, true}, + {"same seq new tid is ASP duplicate, dropped", 1, 102, false}, + {"after duplicate, advancing seq accepted", 2, 103, true}, + {"seqNum wraparound back to 0 accepted", 0, 104, true}, + } + var f seqFilter + for _, tc := range tests { + if got := f.accept(tc.seq, tc.tid); got != tc.want { + t.Errorf("%s: accept(%d, %d) = %v, want %v", tc.name, tc.seq, tc.tid, got, tc.want) + } + } +} diff --git a/service/asp/session.go b/service/asp/session.go index 0f00177..c1265ea 100644 --- a/service/asp/session.go +++ b/service/asp/session.go @@ -1,3 +1,5 @@ +//go:build afp || all + // Package asp — SessionManager. // // SessionManager owns the lifecycle of every open ASP session: tickle @@ -8,18 +10,53 @@ package asp import ( + "maps" + "slices" "sync" "sync/atomic" "time" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/service/atp" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/service/atp" +) + +// sessionState names the lifecycle of an ASP session. Legal transitions: +// +// stateOpen -> stateClosing (Close called) +// stateClosing -> stateClosed (teardown complete) +// +// Inbound handlers atomically check stateOpen at entry and bail if the +// session is on its way down — guarding against the race where an inbound +// frame and CloseSess interleave. +type sessionState uint32 + +const ( + stateOpen sessionState = iota + stateClosing + stateClosed ) +func (s sessionState) String() string { + switch s { + case stateOpen: + return "Open" + case stateClosing: + return "Closing" + case stateClosed: + return "Closed" + default: + return "?" + } +} + // Session is the per-session state owned by SessionManager. type Session struct { ID uint8 + // state is read by every inbound handler and written by Close. atomic + // because it is accessed without holding mu. + state atomic.Uint32 // sessionState + // Workstation address (where Tickle/WriteContinue/Attention go). WSNet uint16 WSNode uint8 @@ -31,19 +68,20 @@ type Session struct { SrvNet uint16 SrvNode uint8 - // Sequence number duplicate filtering (spec §"Sequencing and duplicate - // filtration"). Same seqNum + different ATP TID = true ASP duplicate - // (drop). Same seqNum + same TID = ATP retransmission — but ATP XO - // already filters those before they reach us, so we can drop them. - seqMu sync.Mutex - lastReqNum uint16 - lastTID uint16 - seqInited bool + // mu serialises everything mutable that can be touched from both the + // engine inbound goroutine and Close (running on the maintenance + // goroutine or the inbound goroutine that handled CloseSess): the + // sequence-number filter and the two-phase write state. Hold time is + // microseconds; one lock is simpler to reason about than two. + mu sync.Mutex + + // seq filters ASP-level duplicates per spec §"Sequencing and duplicate + // filtration". Held under mu. + seq seqFilter // Two-phase Write state (one in flight per session is sufficient — the // Mac client serializes Write commands behind their seqNum). - writeMu sync.Mutex - write *writeState + write *writeState lastActivity atomic.Int64 // Unix nanoseconds @@ -52,8 +90,73 @@ type Session struct { func (s *Session) touchActivity() { s.lastActivity.Store(time.Now().UnixNano()) } +// isOpen reports whether the session is still accepting inbound traffic. +// Once Close transitions it out of stateOpen, every handler should bail. +func (s *Session) isOpen() bool { return sessionState(s.state.Load()) == stateOpen } + +// markClosing atomically transitions stateOpen->stateClosing. Returns true +// if this caller won the transition and is responsible for teardown. +func (s *Session) markClosing() bool { + return s.state.CompareAndSwap(uint32(stateOpen), uint32(stateClosing)) +} + +// markClosed marks teardown complete. Idempotent. +func (s *Session) markClosed() { s.state.Store(uint32(stateClosed)) } + +// beginWrite transitions the session's write state from Idle to AwaitingData +// and records the in-flight write. Returns false (and changes nothing) if a +// write is already in flight — protocol-wise this should not happen because +// the Mac client serialises Write commands behind seqNum, but we surface the +// invariant violation rather than silently overwrite. +func (s *Session) beginWrite(ws *writeState) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.write != nil && s.write.phase != writeIdle { + return false + } + ws.phase = writeAwaitingData + s.write = ws + return true +} + +// endWrite transitions back to Idle and clears the in-flight write, +// returning the previous state (if any) so callers can act on its pending. +// Safe to call on an already-Idle session — returns nil. +func (s *Session) endWrite() *writeState { + s.mu.Lock() + defer s.mu.Unlock() + prev := s.write + s.write = nil + return prev +} + +// writePhase names the states of the SPWrite two-phase exchange so each +// transition is checked against a known-legal edge instead of inferred +// from field nil-ness. Legal edges: +// +// writeIdle -> writeAwaitingData (handleASPWrite sent WriteContinue TReq) +// writeAwaitingData -> writeIdle (completeWrite resolved or cancelled) +type writePhase uint8 + +const ( + writeIdle writePhase = iota + writeAwaitingData +) + +func (p writePhase) String() string { + switch p { + case writeIdle: + return "Idle" + case writeAwaitingData: + return "AwaitingData" + default: + return "?" + } +} + // writeState holds in-flight state for the two-phase aspWrite protocol. type writeState struct { + phase writePhase seqNum uint16 cmdBlock []byte wantBytes uint32 @@ -145,14 +248,12 @@ func (m *SessionManager) Get(id uint8) *Session { func (m *SessionManager) SessionIDs() []uint8 { m.mu.RLock() defer m.mu.RUnlock() - ids := make([]uint8, 0, len(m.sessions)) - for id := range m.sessions { - ids = append(ids, id) - } - return ids + return slices.Collect(maps.Keys(m.sessions)) } -// Close terminates a session. +// Close terminates a session. The CAS on session state means concurrent +// callers (e.g. CloseSess inbound + maintenance timeout) observe a single +// teardown; only the winner runs the cancellation and onClose callback. func (m *SessionManager) Close(id uint8) { m.mu.Lock() sess, ok := m.sessions[id] @@ -161,37 +262,58 @@ func (m *SessionManager) Close(id uint8) { delete(m.sessions, id) } m.mu.Unlock() - if ok { - snap := *sess - close(sess.stop) - // Cancel any in-flight WriteContinue. - sess.writeMu.Lock() - if sess.write != nil && sess.write.pending != nil { - sess.write.pending.Cancel() - } - sess.write = nil - sess.writeMu.Unlock() - if onClose != nil { - onClose(&snap) - } + if !ok { + return + } + if !sess.markClosing() { + // Another goroutine already started teardown. + return + } + close(sess.stop) + if prev := sess.endWrite(); prev != nil && prev.pending != nil { + prev.pending.Cancel() + } + sess.markClosed() + if onClose != nil { + onClose(sess) } } -// CheckDuplicate implements ASP sequence-number duplicate filtration. -// Returns true if the request should be processed; false if it is a duplicate -// and should be silently dropped. -func (s *Session) CheckDuplicate(seqNum, tid uint16) bool { - s.seqMu.Lock() - defer s.seqMu.Unlock() - if s.seqInited && seqNum == s.lastReqNum && tid != s.lastTID { +// seqFilter implements ASP sequence-number duplicate filtration per spec +// §"Sequencing and duplicate filtration". A request whose seqNum repeats +// the last accepted seqNum but carries a different ATP TID is a true +// ASP-level duplicate and is dropped. (Same seqNum + same TID is an ATP +// retransmission, but ATP XO already filters those before they reach us.) +// +// Stored under Session.mu; the type itself is intentionally lock-free +// so it can be unit-tested in isolation. +type seqFilter struct { + lastSeq uint16 + lastTID uint16 + inited bool +} + +// accept records (seq, tid) and reports whether the request should be +// processed. False means duplicate — drop. +func (f *seqFilter) accept(seq, tid uint16) bool { + if f.inited && seq == f.lastSeq && tid != f.lastTID { return false } - s.lastReqNum = seqNum - s.lastTID = tid - s.seqInited = true + f.lastSeq = seq + f.lastTID = tid + f.inited = true return true } +// CheckDuplicate is the locked Session-level entrypoint for seqFilter.accept. +// Returns true if the request should be processed; false if it is a duplicate +// and should be silently dropped. +func (s *Session) CheckDuplicate(seqNum, tid uint16) bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.seq.accept(seqNum, tid) +} + // maintenance runs the per-session tickle + inactivity-timeout loop. func (m *SessionManager) maintenance(sess *Session) { ticker := time.NewTicker(m.tickleInterval) diff --git a/service/asp/types.go b/service/asp/types.go index 05cd0a0..a03b255 100644 --- a/service/asp/types.go +++ b/service/asp/types.go @@ -1,296 +1,75 @@ +//go:build afp || all + package asp import ( - "encoding/binary" - "time" + pasp "github.com/pgodw/omnitalk/protocol/asp" ) -// --------------------------------------------------------------------------- -// SPFunction codes — first byte (MSB) of ATP UserData in every ASP packet. -// Inside AppleTalk, 2nd Edition, Chapter 11, §"SPFunction values". -// --------------------------------------------------------------------------- - +// SPFunction codes. const ( - SPFuncCloseSess = 1 // workstation → server - SPFuncCommand = 2 // workstation → server - SPFuncGetStatus = 3 // workstation → server - SPFuncOpenSess = 4 // workstation → server - SPFuncTickle = 5 // both directions - SPFuncWrite = 6 // workstation → server (phase 1 of two-phase write) - SPFuncWriteContinue = 7 // server → workstation (phase 2: server requests write data) - SPFuncAttention = 8 // server → workstation + SPFuncCloseSess = pasp.SPFuncCloseSess + SPFuncCommand = pasp.SPFuncCommand + SPFuncGetStatus = pasp.SPFuncGetStatus + SPFuncOpenSess = pasp.SPFuncOpenSess + SPFuncTickle = pasp.SPFuncTickle + SPFuncWrite = pasp.SPFuncWrite + SPFuncWriteContinue = pasp.SPFuncWriteContinue + SPFuncAttention = pasp.SPFuncAttention ) -// --------------------------------------------------------------------------- -// ASP protocol version number — §"Opening a session". -// The OpenSess packet carries this in the 2-byte version field. -// --------------------------------------------------------------------------- - -const ASPVersion uint16 = 0x0100 - -// --------------------------------------------------------------------------- -// Timer values — §"Timeouts and retry counts" / §"Maintaining the session". -// --------------------------------------------------------------------------- - +// Version + timers. const ( - // TickleInterval is the period between keep-alive tickle packets (spec: 30 s). - TickleInterval = 30 * time.Second - - // SessionMaintenanceTimeout is the inactivity duration after which a session - // is assumed dead (spec: 2 minutes). - SessionMaintenanceTimeout = 2 * time.Minute + ASPVersion = pasp.Version + TickleInterval = pasp.TickleInterval + SessionMaintenanceTimeout = pasp.SessionMaintenanceTimeout ) -// --------------------------------------------------------------------------- -// ASP Error Codes — Inside Macintosh: Networking, Chapter 8. -// Decimal / hex values per the spec table. -// --------------------------------------------------------------------------- - +// Error codes. const ( - SPErrorNoError = 0 // $00 — no error (both ends) - SPErrorBadVersNum = -1066 // $FBD6 — workstation end only - SPErrorBufTooSmall = -1067 // $FBD5 — workstation end only - SPErrorNoMoreSessions = -1068 // $FBD4 — both ends - SPErrorNoServers = -1069 // $FBD3 — workstation end only - SPErrorParamErr = -1070 // $FBD2 — both ends - SPErrorServerBusy = -1071 // $FBD1 — workstation end only - SPErrorSessClosed = -1072 // $FBD0 — both ends - SPErrorSizeErr = -1073 // $FBCF — both ends - SPErrorTooManyClients = -1074 // $FBCE — server end only - SPErrorNoAck = -1075 // $FBCD — server end only + SPErrorNoError = pasp.SPErrorNoError + SPErrorBadVersNum = pasp.SPErrorBadVersNum + SPErrorBufTooSmall = pasp.SPErrorBufTooSmall + SPErrorNoMoreSessions = pasp.SPErrorNoMoreSessions + SPErrorNoServers = pasp.SPErrorNoServers + SPErrorParamErr = pasp.SPErrorParamErr + SPErrorServerBusy = pasp.SPErrorServerBusy + SPErrorSessClosed = pasp.SPErrorSessClosed + SPErrorSizeErr = pasp.SPErrorSizeErr + SPErrorTooManyClients = pasp.SPErrorTooManyClients + SPErrorNoAck = pasp.SPErrorNoAck ) -// AFP attention codes sent via SPFuncAttention. -// The attention word is a 16-bit value placed in the 2-byte ATP data payload. -// See Inside Macintosh: Files, Chapter 3 (AFP). -const ( - // AspAttnServerGoingDown signals that the AFP server is shutting down. - // Bit 15 is the "server is going down" flag defined by the AFP spec. - AspAttnServerGoingDown uint16 = 0x8000 -) +// AFP attention codes. +const AspAttnServerGoingDown = pasp.AspAttnServerGoingDown -// --------------------------------------------------------------------------- // ATP-derived size constants. -// --------------------------------------------------------------------------- - const ( - // ATPMaxData is the maximum data payload per ATP response packet. - // DDP max data = 586 bytes; ATP header = 8 bytes → 578 bytes. - ATPMaxData = 578 - - // ATPMaxPackets is the maximum number of response packets in a single - // ATP transaction (bitmap has 8 bits). - ATPMaxPackets = 8 - - // QuantumSize is the maximum size reply block (or SPWrtContinue write data) - // on a standard AppleTalk network: 8 × 578 = 4624 bytes. - // On LocalTalk the client reports a smaller bitmap (typically 1 packet = 578). - QuantumSize = ATPMaxData * ATPMaxPackets + ATPMaxData = pasp.ATPMaxData + ATPMaxPackets = pasp.ATPMaxPackets + QuantumSize = pasp.QuantumSize ) -// --------------------------------------------------------------------------- -// SPGetParms — local API call (no network packet). -// -// Before any sessions are opened, both the workstation ASP client and the -// server ASP client should interrogate ASP to identify the maximum sizes of -// commands and replies allowed by the underlying transport mechanism. -// On a standard AppleTalk network (ASP over ATP): MaxCmdSize = 578 bytes, -// QuantumSize = 4624 bytes. For transports other than ATP these may differ. -// -// See Service.SPGetParms in asp.go for the runtime implementation. -// --------------------------------------------------------------------------- - -// GetParmsResult holds the values returned by an SPGetParms call. -type GetParmsResult struct { - MaxCmdSize uint16 // maximum size of a command block (bytes) - QuantumSize uint16 // maximum size of a reply block or SPWrtContinue write data (bytes) -} - -// =================================================================== -// Packet types — one struct per SPFunction. -// -// UserData byte layout (MSB first, 4 bytes in ATP header): -// [0] SPFunction -// [1] SessionID (or WSSSocket for OpenSess request) -// [2:3] SeqNum / VersionNum / AttentionCode / 0 -// -// Each type that the server parses from incoming packets has a -// ParseXxx(userData uint32, payload []byte) factory. -// Each type that the server marshals for outgoing packets has a -// MarshalUserData() uint32 method; types with an ATP data payload -// also have a MarshalData() []byte method. -// =================================================================== - -// --------------------------------------------------------------------------- -// OpenSess — workstation → server (TReq to SLS) -// --------------------------------------------------------------------------- - -// OpenSessPacket represents an incoming ASP OpenSess request. -type OpenSessPacket struct { - WSSSocket uint8 // workstation session socket - VersionNum uint16 // ASP version number (expected: ASPVersion = 0x0100) -} - -// ParseOpenSessPacket extracts fields from the ATP UserData of an OpenSess TReq. -func ParseOpenSessPacket(userData uint32) OpenSessPacket { - return OpenSessPacket{ - WSSSocket: uint8((userData >> 16) & 0xFF), - VersionNum: uint16(userData & 0xFFFF), - } -} - -// --------------------------------------------------------------------------- -// OpenSessReply — server → workstation (TResp to OpenSess TReq) -// --------------------------------------------------------------------------- - -// OpenSessReplyPacket represents an outgoing ASP OpenSess reply. -type OpenSessReplyPacket struct { - SSSSocket uint8 // server session socket - SessionID uint8 - ErrorCode int16 // 0 = success; SPErrorBadVersNum, SPErrorServerBusy, SPErrorTooManyClients -} - -// MarshalUserData encodes the reply into the 4-byte ATP UserData field. -// -// [0] SSSSocket [1] SessionID [2:3] ErrorCode (big-endian) -func (p OpenSessReplyPacket) MarshalUserData() uint32 { - return (uint32(p.SSSSocket) << 24) | - (uint32(p.SessionID) << 16) | - uint32(uint16(p.ErrorCode)) -} - -// --------------------------------------------------------------------------- -// CloseSess — workstation → server (TReq to SSS) -// --------------------------------------------------------------------------- - -// CloseSessPacket represents an incoming ASP CloseSess request. -type CloseSessPacket struct { - SessionID uint8 -} - -// ParseCloseSessPacket extracts fields from the ATP UserData of a CloseSess TReq. -func ParseCloseSessPacket(userData uint32) CloseSessPacket { - return CloseSessPacket{ - SessionID: uint8((userData >> 16) & 0xFF), - } -} - -// --------------------------------------------------------------------------- -// CloseSessReply — server → workstation (TResp to CloseSess TReq) -// --------------------------------------------------------------------------- - -// CloseSessReplyUserData returns the ATP UserData for a CloseSess reply (all zeros). -func CloseSessReplyUserData() uint32 { return 0 } - -// --------------------------------------------------------------------------- -// GetStatus — workstation → server (TReq to SLS) -// --------------------------------------------------------------------------- - -// GetStatusPacket represents an incoming ASP GetStatus request. -// No fields beyond SPFunction; the rest of UserData is zero per spec. -type GetStatusPacket struct{} - -// ParseGetStatusPacket is provided for completeness; UserData is unused. -func ParseGetStatusPacket(_ uint32) GetStatusPacket { return GetStatusPacket{} } - -// --------------------------------------------------------------------------- -// Command — workstation → server (TReq to SSS) -// --------------------------------------------------------------------------- - -// CommandPacket represents an incoming ASP Command request. -type CommandPacket struct { - SessionID uint8 - SeqNum uint16 - CmdBlock []byte // AFP command block (ATP data payload) -} - -// ParseCommandPacket extracts fields from the ATP UserData and payload. -func ParseCommandPacket(userData uint32, payload []byte) CommandPacket { - return CommandPacket{ - SessionID: uint8((userData >> 16) & 0xFF), - SeqNum: uint16(userData & 0xFFFF), - CmdBlock: payload, - } -} - -// --------------------------------------------------------------------------- -// Write — workstation → server (TReq to SSS, phase 1) -// --------------------------------------------------------------------------- - -// WritePacket represents an incoming ASP Write request (same layout as Command). -type WritePacket struct { - SessionID uint8 - SeqNum uint16 - CmdBlock []byte // AFP command block (e.g. FPWrite header) -} - -// ParseWritePacket extracts fields from the ATP UserData and payload. -func ParseWritePacket(userData uint32, payload []byte) WritePacket { - return WritePacket{ - SessionID: uint8((userData >> 16) & 0xFF), - SeqNum: uint16(userData & 0xFFFF), - CmdBlock: payload, - } -} - -// --------------------------------------------------------------------------- -// WriteContinue — server → workstation WSS (TReq, phase 2) -// --------------------------------------------------------------------------- - -// WriteContinuePacket represents an outgoing ASP WriteContinue request. -type WriteContinuePacket struct { - SessionID uint8 - SeqNum uint16 // same sequence number as the original Write - BufferSize uint16 // available buffer size (bytes the server wants) -} - -// MarshalUserData encodes the WriteContinue into the 4-byte ATP UserData. -// -// [0] SPFuncWriteContinue [1] SessionID [2:3] SeqNum -func (p WriteContinuePacket) MarshalUserData() uint32 { - return (uint32(SPFuncWriteContinue) << 24) | - (uint32(p.SessionID) << 16) | - uint32(p.SeqNum) -} - -// MarshalData returns the 2-byte ATP data payload (buffer size, big-endian). -func (p WriteContinuePacket) MarshalData() []byte { - b := make([]byte, 2) - binary.BigEndian.PutUint16(b, p.BufferSize) - return b -} - -// --------------------------------------------------------------------------- -// Tickle — both directions (TReq, ALO, retry=infinite) -// --------------------------------------------------------------------------- - -// TicklePacket represents an outgoing ASP Tickle. -type TicklePacket struct { - SessionID uint8 -} - -// MarshalUserData encodes the Tickle into the 4-byte ATP UserData. -// -// [0] SPFuncTickle [1] SessionID [2:3] 0 -func (p TicklePacket) MarshalUserData() uint32 { - return (uint32(SPFuncTickle) << 24) | (uint32(p.SessionID) << 16) -} - -// --------------------------------------------------------------------------- -// Attention — server → workstation WSS (TReq, ALO) -// --------------------------------------------------------------------------- - -// AttentionPacket represents an outgoing ASP Attention. -type AttentionPacket struct { - SessionID uint8 - AttentionCode uint16 // must be non-zero per spec -} +// Wire types. +type ( + GetParmsResult = pasp.GetParmsResult + OpenSessPacket = pasp.OpenSessPacket + OpenSessReplyPacket = pasp.OpenSessReplyPacket + CloseSessPacket = pasp.CloseSessPacket + GetStatusPacket = pasp.GetStatusPacket + CommandPacket = pasp.CommandPacket + WritePacket = pasp.WritePacket + WriteContinuePacket = pasp.WriteContinuePacket + TicklePacket = pasp.TicklePacket + AttentionPacket = pasp.AttentionPacket +) -// MarshalUserData encodes the Attention into the 4-byte ATP UserData. -// -// [0] SPFuncAttention [1] SessionID [2:3] AttentionCode -func (p AttentionPacket) MarshalUserData() uint32 { - return (uint32(SPFuncAttention) << 24) | - (uint32(p.SessionID) << 16) | - uint32(p.AttentionCode) -} +// Parse helpers. +var ( + ParseOpenSessPacket = pasp.ParseOpenSessPacket + ParseCloseSessPacket = pasp.ParseCloseSessPacket + ParseGetStatusPacket = pasp.ParseGetStatusPacket + ParseCommandPacket = pasp.ParseCommandPacket + ParseWritePacket = pasp.ParseWritePacket + CloseSessReplyUserData = pasp.CloseSessReplyUserData +) diff --git a/service/atp/transaction.go b/service/atp/transaction.go index a4f8a8a..5324382 100644 --- a/service/atp/transaction.go +++ b/service/atp/transaction.go @@ -17,7 +17,8 @@ import ( "sync" "time" - "github.com/pgodw/omnitalk/go/netlog" + "github.com/pgodw/omnitalk/netlog" + patp "github.com/pgodw/omnitalk/protocol/atp" ) // ----- Address / Sender / Clock ------------------------------------------- @@ -84,7 +85,7 @@ type Request struct { Data []byte NumBuffers int // number of TResp packets the caller has reserved (1..8) XO bool - TRelTO TRelTimeout + TRelTO patp.TRelTimeout RetryTimeout time.Duration MaxRetries int // -1 = infinite } @@ -92,7 +93,7 @@ type Request struct { // Response is the assembled result of a successful transaction. type Response struct { Buffers [][]byte // index = sequence number; nil if not received (only possible after EOM) - UserBytes [MaxResponsePackets]uint32 + UserBytes [patp.MaxResponsePackets]uint32 Count int // number of packets actually delivered } @@ -105,7 +106,7 @@ type IncomingRequest struct { Data []byte Bitmap uint8 XO bool - TRelTO TRelTimeout + TRelTO patp.TRelTimeout } // ResponseMessage is what the responder handler returns. @@ -223,7 +224,7 @@ type tcb struct { dst Address tid uint16 xo bool - trelTO TRelTimeout + trelTO patp.TRelTimeout bitmap uint8 // bits still outstanding expected int // number of buffers requested resp Response @@ -266,10 +267,10 @@ func (p *Pending) Cancel() { // SendRequest issues a new transaction and returns a Pending handle. func (e *Endpoint) SendRequest(req Request) (*Pending, error) { - if req.NumBuffers < 1 || req.NumBuffers > MaxResponsePackets { + if req.NumBuffers < 1 || req.NumBuffers > patp.MaxResponsePackets { return nil, ErrInvalidNumBuffers } - if len(req.Data) > MaxATPData { + if len(req.Data) > patp.MaxATPData { return nil, ErrDataTooLarge } if req.RetryTimeout <= 0 { @@ -308,15 +309,15 @@ func (e *Endpoint) SendRequest(req Request) (*Pending, error) { } func (e *Endpoint) buildTReq(t *tcb, userBytes uint32, data []byte) []byte { - ctrl := uint8(TREQ) + ctrl := uint8(patp.TREQ) if t.xo { - ctrl |= XO + ctrl |= patp.XO ctrl |= uint8(t.trelTO) & 0x07 } - h := ATPHeader{Control: ctrl, Bitmap: t.bitmap, TransID: t.tid, UserData: userBytes} - out := make([]byte, ATPHeaderSize+len(data)) + h := patp.Header{Control: ctrl, Bitmap: t.bitmap, TransID: t.tid, UserData: userBytes} + out := make([]byte, patp.HeaderSize+len(data)) copy(out, h.Marshal()) - copy(out[ATPHeaderSize:], data) + copy(out[patp.HeaderSize:], data) return out } @@ -434,25 +435,25 @@ type ResponsePacket struct { // it to retain a pointer to the original datagram + rxPort so the Sender // implementation can call e.g. router.Reply. func (e *Endpoint) HandleInbound(packet []byte, src, local Address, hint any) { - var h ATPHeader + var h patp.Header if err := h.Unmarshal(packet); err != nil { return } var data []byte - if len(packet) > ATPHeaderSize { - data = packet[ATPHeaderSize:] + if len(packet) > patp.HeaderSize { + data = packet[patp.HeaderSize:] } switch h.FuncCode() { - case FuncTReq: + case patp.FuncTReq: e.handleTReq(h, data, src, local, hint) - case FuncTResp: + case patp.FuncTResp: e.handleTResp(h, data, src) - case FuncTRel: + case patp.FuncTRel: e.handleTRel(h, src) } } -func (e *Endpoint) handleTResp(h ATPHeader, data []byte, src Address) { +func (e *Endpoint) handleTResp(h patp.Header, data []byte, src Address) { e.mu.Lock() t, ok := e.tcbs[h.TransID] if !ok || t.dst != src { @@ -461,7 +462,7 @@ func (e *Endpoint) handleTResp(h ATPHeader, data []byte, src Address) { return } seq := h.Bitmap // sequence number for TResp - if int(seq) >= MaxResponsePackets || int(seq) >= t.expected { + if int(seq) >= patp.MaxResponsePackets || int(seq) >= t.expected { e.mu.Unlock() return } @@ -476,7 +477,7 @@ func (e *Endpoint) handleTResp(h ATPHeader, data []byte, src Address) { } if h.EOM() { // Clear all higher bits. - for s := int(seq) + 1; s < MaxResponsePackets; s++ { + for s := int(seq) + 1; s < patp.MaxResponsePackets; s++ { t.bitmap &^= 1 << s } } @@ -520,14 +521,14 @@ func (e *Endpoint) handleTResp(h ATPHeader, data []byte, src Address) { } func (e *Endpoint) sendTRel(src, dst Address, tid uint16) { - h := ATPHeader{Control: TREL, TransID: tid} + h := patp.Header{Control: patp.TREL, TransID: tid} pkt := h.Marshal() _ = e.sender.Send(src, dst, pkt, nil) } // ----- Responder ---------------------------------------------------------- -func (e *Endpoint) handleTReq(h ATPHeader, data []byte, src, local Address, hint any) { +func (e *Endpoint) handleTReq(h patp.Header, data []byte, src, local Address, hint any) { if !e.admissible(src) { return } @@ -605,11 +606,11 @@ func (e *Endpoint) handleTReq(h ATPHeader, data []byte, src, local Address, hint bitmap := h.Bitmap reply := func(resp ResponseMessage) { replied.Do(func() { - if len(resp.Buffers) > MaxResponsePackets { + if len(resp.Buffers) > patp.MaxResponsePackets { return } for _, b := range resp.Buffers { - if len(b) > MaxATPData { + if len(b) > patp.MaxATPData { return } } @@ -654,15 +655,15 @@ func buildResponsePackets(tid uint16, resp ResponseMessage) []ResponsePacket { out := make([]ResponsePacket, len(resp.Buffers)) last := len(resp.Buffers) - 1 for i, data := range resp.Buffers { - ctrl := uint8(TRESP) + ctrl := uint8(patp.TRESP) if i == last { - ctrl |= EOM + ctrl |= patp.EOM } var ub uint32 if i < len(resp.UserBytes) { ub = resp.UserBytes[i] } - h := ATPHeader{Control: ctrl, Bitmap: uint8(i), TransID: tid, UserData: ub} + h := patp.Header{Control: ctrl, Bitmap: uint8(i), TransID: tid, UserData: ub} out[i] = ResponsePacket{ Header: h.Marshal(), Data: append([]byte(nil), data...), @@ -738,7 +739,7 @@ func (e *Endpoint) relaxResponderPacingLocked(dst Address) { p.interPacketDelay -= adaptivePacerRecoveryStep } -func (e *Endpoint) handleTRel(h ATPHeader, src Address) { +func (e *Endpoint) handleTRel(h patp.Header, src Address) { e.mu.Lock() key := rspKey{src: src, tid: h.TransID} r, ok := e.rspcbs[key] @@ -765,7 +766,7 @@ func (e *Endpoint) expireRspCB(r *rspcb) { // ----- helpers ------------------------------------------------------------ func fullBitmap(n int) uint8 { - if n >= MaxResponsePackets { + if n >= patp.MaxResponsePackets { return 0xFF } return (1 << uint(n)) - 1 diff --git a/service/atp/wire.go b/service/atp/wire.go new file mode 100644 index 0000000..5832a0c --- /dev/null +++ b/service/atp/wire.go @@ -0,0 +1,53 @@ +// Package atp wire-format re-exports. +// +// The wire format (header layout, control-bit constants, codec) lives in +// protocol/atp. This file re-exports those symbols under their historical +// names so the state-machine code in this package and its callers don't +// need to spell out an import alias for every reference. +package atp + +import ( + patp "github.com/pgodw/omnitalk/protocol/atp" +) + +// Header type. +type ATPHeader = patp.Header + +// Function-code helpers. +type FuncCode = patp.FuncCode + +const ( + FuncTReq = patp.FuncTReq + FuncTResp = patp.FuncTResp + FuncTRel = patp.FuncTRel +) + +// Control-byte bit masks. +const ( + TREQ = patp.TREQ + TRESP = patp.TRESP + TREL = patp.TREL + XO = patp.XO + EOM = patp.EOM + STS = patp.STS + FuncMask = patp.FuncMask +) + +// TRel timeout indicator. +type TRelTimeout = patp.TRelTimeout + +const ( + TRel30s = patp.TRel30s + TRel1m = patp.TRel1m + TRel2m = patp.TRel2m + TRel4m = patp.TRel4m + TRel8m = patp.TRel8m +) + +// Protocol limits and DDP type. +const ( + MaxResponsePackets = patp.MaxResponsePackets + MaxATPData = patp.MaxATPData + DDPTypeATP = patp.DDPType + ATPHeaderSize = patp.HeaderSize +) diff --git a/service/dsi/doc.go b/service/dsi/doc.go new file mode 100644 index 0000000..d040918 --- /dev/null +++ b/service/dsi/doc.go @@ -0,0 +1,8 @@ +//go:build afp || all + +// Package dsi implements the Data Stream Interface — Apple's TCP-based +// transport for AFP (Apple Filing Protocol) used by AFP-over-TCP/IP +// clients (Mac OS 9+ and later). +// +// See spec/12-dsi.md and Apple's AFP 3.x specification. +package dsi diff --git a/service/dsi/dsi.go b/service/dsi/dsi.go index e845aca..2c92cac 100644 --- a/service/dsi/dsi.go +++ b/service/dsi/dsi.go @@ -1,3 +1,5 @@ +//go:build afp || all + /* Package dsi implements the Data Stream Interface (DSI). @@ -9,15 +11,19 @@ Refer: AppleTalk Filing Protocol 2.1 & 2.2 / AFP over TCP/IP Specification. package dsi import ( + "context" "encoding/binary" "io" "net" + "sync" + + "github.com/pgodw/omnitalk/protocol/ddp" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" - "github.com/pgodw/omnitalk/go/service/afp" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/pkg/binutil" + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/service" + "github.com/pgodw/omnitalk/service/afp" ) // DSI Command Codes @@ -62,33 +68,48 @@ type Header struct { const HeaderSize = 16 -func (h *Header) Marshal() []byte { - b := make([]byte, HeaderSize) +// WireSize returns the fixed 16-byte DSI header size. +func (h *Header) WireSize() int { return HeaderSize } + +// MarshalWire encodes the header into b. +func (h *Header) MarshalWire(b []byte) (int, error) { + if len(b) < HeaderSize { + return 0, binutil.ErrShortBuffer + } b[0] = h.Flags b[1] = h.Command - binary.BigEndian.PutUint16(b[2:4], h.RequestID) - binary.BigEndian.PutUint32(b[4:8], h.ErrorOffset) - binary.BigEndian.PutUint32(b[8:12], h.DataLen) - binary.BigEndian.PutUint32(b[12:16], h.Reserved) - return b + _, _ = binutil.PutU16(b[2:], h.RequestID) + _, _ = binutil.PutU32(b[4:], h.ErrorOffset) + _, _ = binutil.PutU32(b[8:], h.DataLen) + _, _ = binutil.PutU32(b[12:], h.Reserved) + return HeaderSize, nil } -func (h *Header) Unmarshal(b []byte) error { +// UnmarshalWire decodes the header from b. +func (h *Header) UnmarshalWire(b []byte) (int, error) { if len(b) < HeaderSize { - return io.ErrUnexpectedEOF + return 0, binutil.ErrShortBuffer } h.Flags = b[0] h.Command = b[1] - h.RequestID = binary.BigEndian.Uint16(b[2:4]) - h.ErrorOffset = binary.BigEndian.Uint32(b[4:8]) - h.DataLen = binary.BigEndian.Uint32(b[8:12]) - h.Reserved = binary.BigEndian.Uint32(b[12:16]) - return nil + h.RequestID, _, _ = binutil.GetU16(b[2:]) + h.ErrorOffset, _, _ = binutil.GetU32(b[4:]) + h.DataLen, _, _ = binutil.GetU32(b[8:]) + h.Reserved, _, _ = binutil.GetU32(b[12:]) + return HeaderSize, nil } -type AFPVersion struct { - VersionName string - Version int +func (h *Header) Marshal() []byte { + b := make([]byte, HeaderSize) + _, _ = h.MarshalWire(b) + return b +} + +func (h *Header) Unmarshal(b []byte) error { + if _, err := h.UnmarshalWire(b); err != nil { + return io.ErrUnexpectedEOF + } + return nil } type Server struct { @@ -97,6 +118,12 @@ type Server struct { afpServer afp.CommandHandler listener net.Listener stop chan struct{} + wg sync.WaitGroup + + // connsMu protects conns. conns tracks every accepted client connection so + // Stop can force them closed and unblock any in-flight io.ReadFull calls. + connsMu sync.Mutex + conns map[net.Conn]struct{} } func NewServer(serverName string, addr string, afpHandler afp.CommandHandler) *Server { @@ -105,7 +132,28 @@ func NewServer(serverName string, addr string, afpHandler afp.CommandHandler) *S addr: addr, afpServer: afpHandler, stop: make(chan struct{}), + conns: make(map[net.Conn]struct{}), + } +} + +// trackConn registers conn so Stop can close it. Returns false if the server +// is already stopping, in which case the caller must close conn itself. +func (s *Server) trackConn(conn net.Conn) bool { + s.connsMu.Lock() + defer s.connsMu.Unlock() + select { + case <-s.stop: + return false + default: } + s.conns[conn] = struct{}{} + return true +} + +func (s *Server) untrackConn(conn net.Conn) { + s.connsMu.Lock() + defer s.connsMu.Unlock() + delete(s.conns, conn) } // SetCommandHandler assigns the AFP command handler to this server. @@ -114,14 +162,16 @@ func (s *Server) SetCommandHandler(handler afp.CommandHandler) { } // Start implements afp.Transport. -func (s *Server) Start(router service.Router) error { +func (s *Server) Start(ctx context.Context, router service.Router) error { l, err := net.Listen("tcp", s.addr) if err != nil { return err } s.listener = l + s.wg.Add(1) go func() { + defer s.wg.Done() for { conn, err := s.listener.Accept() if err != nil { @@ -133,27 +183,48 @@ func (s *Server) Start(router service.Router) error { netlog.Debug("[DSI] accept error: %v", err) continue } + if !s.trackConn(conn) { + _ = conn.Close() + return + } netlog.Debug("[DSI] connection accepted from %s", conn.RemoteAddr()) - go s.handleConn(conn) + s.wg.Add(1) + go func(c net.Conn) { + defer s.wg.Done() + defer s.untrackConn(c) + s.handleConn(c) + }(conn) } }() return nil } -// Stop implements afp.Transport. +// Stop implements afp.Transport. Closes the listener and every active +// client connection so per-conn handlers blocked in io.ReadFull return, +// then waits for accept and per-conn goroutines to exit. func (s *Server) Stop() error { close(s.stop) if s.listener != nil { - return s.listener.Close() + _ = s.listener.Close() + } + s.connsMu.Lock() + for c := range s.conns { + _ = c.Close() } + s.connsMu.Unlock() + s.wg.Wait() return nil } // Inbound implements afp.Transport. -func (s *Server) Inbound(d appletalk.Datagram, p port.Port) { +func (s *Server) Inbound(d ddp.Datagram, p port.Port) { // DSI over TCP does not process DDP packets } +// MaxReadSize implements afp.Transport. DSI streams replies over TCP with no +// fixed per-reply quantum, so AFP should not cap reads on this transport. +func (s *Server) MaxReadSize() int { return 0 } + func (s *Server) ListenAndServe() error { l, err := net.Listen("tcp", s.addr) if err != nil { diff --git a/service/dsi/dsi_wire_test.go b/service/dsi/dsi_wire_test.go new file mode 100644 index 0000000..ba21465 --- /dev/null +++ b/service/dsi/dsi_wire_test.go @@ -0,0 +1,53 @@ +//go:build afp || all + +package dsi + +import ( + "bytes" + "testing" +) + +func TestDSIHeaderWireGolden(t *testing.T) { + t.Parallel() + h := Header{ + Flags: 0x01, + Command: 0x02, + RequestID: 0x1234, + ErrorOffset: 0xCAFEBABE, + DataLen: 0x000000F0, + Reserved: 0xDEADBEEF, + } + want := []byte{ + 0x01, 0x02, 0x12, 0x34, + 0xCA, 0xFE, 0xBA, 0xBE, + 0x00, 0x00, 0x00, 0xF0, + 0xDE, 0xAD, 0xBE, 0xEF, + } + + buf := make([]byte, h.WireSize()) + if _, err := h.MarshalWire(buf); err != nil { + t.Fatalf("MarshalWire: %v", err) + } + if !bytes.Equal(buf, want) { + t.Fatalf("MarshalWire = % x, want % x", buf, want) + } + + var out Header + if _, err := out.UnmarshalWire(buf); err != nil { + t.Fatalf("UnmarshalWire: %v", err) + } + if out != h { + t.Fatalf("round-trip mismatch: got %+v, want %+v", out, h) + } +} + +func TestDSIHeaderShortBuffer(t *testing.T) { + t.Parallel() + h := Header{} + if _, err := h.MarshalWire(make([]byte, 15)); err == nil { + t.Fatal("expected error on short marshal") + } + if _, err := h.UnmarshalWire(make([]byte, 15)); err == nil { + t.Fatal("expected error on short unmarshal") + } +} diff --git a/service/llap/doc.go b/service/llap/doc.go new file mode 100644 index 0000000..583a0af --- /dev/null +++ b/service/llap/doc.go @@ -0,0 +1,7 @@ +// Package llap implements the LocalTalk Link Access Protocol — the +// LocalTalk MAC layer responsible for ENQ/ACK node-address acquisition, +// RTS/CTS handshakes, and frame fragmentation/reassembly above raw +// LocalTalk transports. +// +// See spec/03-llap.md and Inside AppleTalk 2/e §1. +package llap diff --git a/service/llap/llap.go b/service/llap/llap.go index 409704c..617d2be 100644 --- a/service/llap/llap.go +++ b/service/llap/llap.go @@ -1,17 +1,19 @@ package llap import ( + "context" "fmt" "math/bits" "math/rand" "sync" "time" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/port/localtalk" - "github.com/pgodw/omnitalk/go/service" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/port/localtalk" + "github.com/pgodw/omnitalk/service" ) const ( @@ -26,7 +28,7 @@ const ( type ddpInboundRouter interface { service.Router - Inbound(datagram appletalk.Datagram, rxPort port.Port) + Inbound(datagram ddp.Datagram, rxPort port.Port) } type Service struct { @@ -36,6 +38,10 @@ type Service struct { mu sync.Mutex ports map[*localtalk.Port]*portState rand *rand.Rand + + wg sync.WaitGroup + ctx context.Context + cancel context.CancelFunc } type portState struct { @@ -56,19 +62,29 @@ type portState struct { } func New() *Service { + // Pre-arm a never-cancelled ctx so handlers reached before Start (in + // tests that exercise transmit paths directly) don't dereference nil. + // Start replaces this with a real ctx derived from its caller. + ctx, cancel := context.WithCancel(context.Background()) return &Service{ - stop: make(chan struct{}), - ports: make(map[*localtalk.Port]*portState), - rand: rand.New(rand.NewSource(time.Now().UnixNano())), + stop: make(chan struct{}), + ports: make(map[*localtalk.Port]*portState), + rand: rand.New(rand.NewSource(time.Now().UnixNano())), + ctx: ctx, + cancel: cancel, } } -func (s *Service) Start(router service.Router) error { +func (s *Service) Start(ctx context.Context, router service.Router) error { r, ok := router.(ddpInboundRouter) if !ok { return fmt.Errorf("llap: router does not support inbound datagram delivery") } s.router = r + if s.cancel != nil { + s.cancel() + } + s.ctx, s.cancel = context.WithCancel(ctx) s.mu.Lock() defer s.mu.Unlock() for _, st := range s.ports { @@ -79,15 +95,19 @@ func (s *Service) Start(router service.Router) error { func (s *Service) Stop() error { close(s.stop) + if s.cancel != nil { + s.cancel() + } s.mu.Lock() - defer s.mu.Unlock() for _, st := range s.ports { close(st.stop) } + s.mu.Unlock() + s.wg.Wait() return nil } -func (s *Service) Inbound(_ appletalk.Datagram, _ port.Port) {} +func (s *Service) Inbound(_ ddp.Datagram, _ port.Port) {} func (s *Service) RegisterPort(p *localtalk.Port) { s.mu.Lock() @@ -139,7 +159,7 @@ func (s *Service) InboundFrame(p *localtalk.Port, frame localtalk.LLAPFrame) { } } -func (s *Service) TransmitUnicast(p *localtalk.Port, network uint16, node uint8, d appletalk.Datagram) { +func (s *Service) TransmitUnicast(p *localtalk.Port, network uint16, node uint8, d ddp.Datagram) { if network != 0 && network != p.Network() { netlog.Debug("[LLAP] %s dropping unicast to network=%d local-network=%d", p.ShortString(), network, p.Network()) return @@ -168,7 +188,7 @@ func (s *Service) TransmitUnicast(p *localtalk.Port, network uint16, node uint8, } } -func (s *Service) TransmitBroadcast(p *localtalk.Port, d appletalk.Datagram) { +func (s *Service) TransmitBroadcast(p *localtalk.Port, d ddp.Datagram) { st := s.stateFor(p) if !st.isClaimed() { netlog.Debug("[LLAP] %s dropping broadcast while node is unclaimed", p.ShortString()) @@ -192,7 +212,11 @@ func (s *Service) startPortLocked(st *portState) { return } st.started = true - go s.acquireLoop(st) + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.acquireLoop(st) + }() } func (s *Service) acquireLoop(st *portState) { @@ -200,6 +224,8 @@ func (s *Service) acquireLoop(st *portState) { defer ticker.Stop() for { select { + case <-s.ctx.Done(): + return case <-s.stop: return case <-st.stop: @@ -315,14 +341,37 @@ func (s *Service) runDirectedTransmit(st *portState, frame localtalk.LLAPFrame) st.ctsCh = make(chan struct{}, 1) ctsCh := st.ctsCh st.mu.Unlock() + ctsTimer := time.NewTimer(ctsTimeout) select { case <-ctsCh: + ctsTimer.Stop() if err := s.sendFrame(st, frame); err != nil { return err } netlog.Debug("[LLAP] %s transmit success dst=%d attempt=%d local-backoff=%d", st.port.ShortString(), frame.DestinationNode, attempt, localBackoff) return nil - case <-time.After(ctsTimeout): + case <-st.stop: + ctsTimer.Stop() + st.mu.Lock() + st.expectCTSFrom = 0 + st.ctsCh = nil + st.mu.Unlock() + return fmt.Errorf("llap: port stopped during CTS wait") + case <-s.stop: + ctsTimer.Stop() + st.mu.Lock() + st.expectCTSFrom = 0 + st.ctsCh = nil + st.mu.Unlock() + return fmt.Errorf("llap: service stopped during CTS wait") + case <-s.ctx.Done(): + ctsTimer.Stop() + st.mu.Lock() + st.expectCTSFrom = 0 + st.ctsCh = nil + st.mu.Unlock() + return fmt.Errorf("llap: context cancelled during CTS wait: %w", s.ctx.Err()) + case <-ctsTimer.C: st.mu.Lock() st.collisionHistory |= 1 collisionHistory := st.collisionHistory diff --git a/service/llap/llap_test.go b/service/llap/llap_test.go index 8c94f88..137fd35 100644 --- a/service/llap/llap_test.go +++ b/service/llap/llap_test.go @@ -7,9 +7,10 @@ import ( "testing" "time" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port/localtalk" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port/localtalk" ) func TestDirectedTransmitLogsRetryAndBackoff(t *testing.T) { @@ -37,7 +38,7 @@ func TestDirectedTransmitLogsRetryAndBackoff(t *testing.T) { defer log.SetOutput(oldWriter) netlog.SetLevel(netlog.LevelDebug) - d, err := p.BuildDataFrame(0x22, appletalk.Datagram{ + d, err := p.BuildDataFrame(0x22, ddp.Datagram{ DestinationNetwork: 1, SourceNetwork: 1, DestinationNode: 0x22, @@ -89,7 +90,7 @@ func TestDatagramTransmitSkipsRTSCTSForSharedMedium(t *testing.T) { lastActivity: time.Now().Add(-time.Second), } - d, err := p.BuildDataFrame(0x22, appletalk.Datagram{ + d, err := p.BuildDataFrame(0x22, ddp.Datagram{ DestinationNetwork: 1, SourceNetwork: 1, DestinationNode: 0x22, diff --git a/service/macgarden/client.go b/service/macgarden/client.go new file mode 100644 index 0000000..d59fade --- /dev/null +++ b/service/macgarden/client.go @@ -0,0 +1,1088 @@ +package macgarden + +import ( + "bytes" + "context" + "crypto/sha1" + "crypto/tls" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/cookiejar" + "net/url" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/PuerkitoBio/goquery" + "github.com/pgodw/omnitalk/netlog" +) + +const ( + BaseURL = "http://macintoshgarden.org" + headRequestTimeout = 1000 * time.Millisecond + + clientUserAgent = "Mozilla/2.0 (Macintosh; I; 68K)" + clientAccept = "image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, */*" +) + +type Category struct { + Name string + URL string +} + +type SearchResult struct { + Name string + URL string + Snippet string + Type string + UploadDate time.Time +} + +type DownloadLink struct { + Text string + URL string +} + +type DownloadDetails struct { + Title string + Size string + OS string + Links []DownloadLink +} + +type SoftwareItem struct { + Title string + URL string + Description string + Downloads []DownloadDetails + Screenshots []string +} + +type CategoryPageInfo struct { + FirstPage []SearchResult + LastPage []SearchResult + FirstPageCount int + LastPageCount int + PageSize int + LastPageNumber int + TotalCount int +} + +type headCacheEntry struct { + size int64 + err error +} + +type Client struct { + httpClient *http.Client + allowedHost map[string]struct{} + rateLimiter <-chan time.Time + cacheDir string + fetchHead bool + maxRangeSize int // 0 = unlimited; capped per ReadURLRange call + headMu sync.RWMutex + headCache map[string]headCacheEntry + itemCacheMu sync.RWMutex + itemCache map[string]cachedItemDetails +} + +func (c *Client) SetFetchHead(v bool) { c.fetchHead = v } +func (c *Client) FetchHead() bool { return c.fetchHead } +func (c *Client) SetMaxRangeSize(n int) { c.maxRangeSize = n } +func (c *Client) MaxRangeSize() int { return c.maxRangeSize } + +type cachedItemDetails struct { + FetchedAt time.Time `json:"fetched_at"` + SoftwareItem *SoftwareItem `json:"software_item,omitempty"` + HeadResults map[string]int64 `json:"head_results,omitempty"` // fileURL -> size +} + +func NewClient() *Client { + jar, _ := cookiejar.New(nil) + ticker := time.NewTicker(1 * time.Second) + c := &Client{ + rateLimiter: ticker.C, + cacheDir: "._htmlcache", + headCache: make(map[string]headCacheEntry), + httpClient: &http.Client{ + Timeout: 10 * time.Second, + Jar: jar, + // Copy our standard headers onto every redirected request so the + // server sees a consistent client regardless of hop count. + CheckRedirect: func(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return fmt.Errorf("stopped after 10 redirects") + } + if len(via) > 0 { + for key, vals := range via[0].Header { + if _, ok := req.Header[key]; !ok { + req.Header[key] = vals + } + } + } + return nil + }, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + }, + allowedHost: map[string]struct{}{ + "macintoshgarden.org": {}, + "mirror.macintoshgarden.org": {}, + "download.macintoshgarden.org": {}, + "old.mac.gdn": {}, + }, + itemCache: make(map[string]cachedItemDetails), + } + c.loadItemCache() + return c +} + +// Prime establishes a session cookie by fetching the site index. Production +// callers invoke this once after construction; tests skip it so mock +// transports aren't perturbed by an unsolicited GET. +func (c *Client) Prime() { c.primeSession() } + +// primeSession fetches the site index so the server can set a session cookie. +// The cookie jar on httpClient stores it automatically; all subsequent requests +// (fetchDocument, ReadURLRange, FetchFull, rangeContentLength) send it back. +func (c *Client) primeSession() { + netlog.Info("[MacGarden] establishing session: GET %s", BaseURL) + req, err := http.NewRequest(http.MethodGet, BaseURL, nil) + if err != nil { + netlog.Warn("[MacGarden] session prime request error: %v", err) + return + } + c.setHeaders(req) + resp, err := c.httpClient.Do(req) // no rate-limit: one-time startup call + if err != nil { + netlog.Warn("[MacGarden] session prime failed: %v", err) + return + } + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + u, _ := url.Parse(BaseURL) + netlog.Info("[MacGarden] session established, %d cookie(s) stored", len(c.httpClient.Jar.Cookies(u))) +} + +// setHeaders stamps every outbound request with our standard browser identity. +func (c *Client) setHeaders(req *http.Request) { + req.Header.Set("User-Agent", clientUserAgent) + req.Header.Set("Accept", clientAccept) + req.Header.Set("Referer", BaseURL+"/") +} + +// throttledDo drains one rate-limiter token then executes the request. +// Every network call (except the startup session prime) must go through here. +func (c *Client) throttledDo(req *http.Request) (*http.Response, error) { + <-c.rateLimiter + return c.httpClient.Do(req) +} + +// getCachedHead returns a previously stored size from the in-memory head cache. +func (c *Client) getCachedHead(fileURL string) (int64, bool) { + c.headMu.RLock() + defer c.headMu.RUnlock() + if e, ok := c.headCache[fileURL]; ok { + return e.size, true + } + return 0, false +} + +// setCachedHead stores a size in the in-memory head cache. +func (c *Client) setCachedHead(fileURL string, size int64) { + c.headMu.Lock() + c.headCache[fileURL] = headCacheEntry{size: size} + c.headMu.Unlock() +} + +// lookupItemCacheHead checks the persistent item cache for a previously stored +// content-length, avoiding a network round-trip on repeated calls. +func (c *Client) lookupItemCacheHead(fileURL string) (int64, bool) { + c.itemCacheMu.RLock() + defer c.itemCacheMu.RUnlock() + for _, v := range c.itemCache { + if v.HeadResults != nil { + if sz, ok := v.HeadResults[fileURL]; ok { + return sz, true + } + } + } + return 0, false +} + +// recordHeadResult persists a content-length in the item cache and flushes to +// disk. It tries to attach the size to an existing item entry; otherwise it +// creates a stand-alone entry keyed by the file URL. +func (c *Client) recordHeadResult(fileURL string, size int64) { + c.itemCacheMu.Lock() + found := false + for k, v := range c.itemCache { + if k == fileURL || (v.SoftwareItem != nil && containsDownloadURL(v.SoftwareItem, fileURL)) { + if v.HeadResults == nil { + v.HeadResults = make(map[string]int64) + } + v.HeadResults[fileURL] = size + c.itemCache[k] = v + found = true + break + } + } + if !found { + c.itemCache[fileURL] = cachedItemDetails{ + FetchedAt: time.Now(), + HeadResults: map[string]int64{fileURL: size}, + } + } + c.itemCacheMu.Unlock() + c.saveItemCache() +} + +func (c *Client) itemCachePath() string { + return filepath.Join("._itemcache", "itemcache.json") +} + +func (c *Client) loadItemCache() { + c.itemCacheMu.Lock() + defer c.itemCacheMu.Unlock() + cachePath := c.itemCachePath() + body, err := os.ReadFile(cachePath) + if err != nil { + if os.IsNotExist(err) { + c.itemCache = make(map[string]cachedItemDetails) + return + } + return + } + tmp := make(map[string]cachedItemDetails) + if err := json.Unmarshal(body, &tmp); err == nil { + c.itemCache = tmp + } +} + +func (c *Client) saveItemCache() { + c.itemCacheMu.RLock() + defer c.itemCacheMu.RUnlock() + cachePath := c.itemCachePath() + cacheDir := filepath.Dir(cachePath) + _ = os.MkdirAll(cacheDir, 0o755) + body, err := json.MarshalIndent(c.itemCache, "", " ") + if err != nil { + return + } + tmpPath := cachePath + ".tmp" + if err := os.WriteFile(tmpPath, body, 0o644); err != nil { + return + } + _ = os.Rename(tmpPath, cachePath) +} + +func (c *Client) GetCategories() ([]Category, error) { + netlog.Info("[MacGarden] fetching categories from %s", BaseURL) + doc, err := c.fetchDocument(BaseURL) + if err != nil { + netlog.Warn("[MacGarden] failed to fetch categories: %v", err) + return nil, err + } + return c.parseCategoriesFromDocument(doc), nil +} + +func (c *Client) parseCategoriesFromDocument(doc *goquery.Document) []Category { + seen := map[string]struct{}{} + result := make([]Category, 0, 64) + addCategory := func(name string, href string) { + name = strings.TrimSpace(name) + if name == "" { + return + } + u := c.normalizeURL(href) + if u == "" { + return + } + key := strings.ToLower(name) + "|" + u + if _, exists := seen[key]; exists { + return + } + seen[key] = struct{}{} + result = append(result, Category{Name: name, URL: u}) + } + + // Legacy selector used by older Macintosh Garden markup. + doc.Find("a[href*='/category/']").Each(func(_ int, s *goquery.Selection) { + href, _ := s.Attr("href") + addCategory(s.Text(), href) + }) + + // Modern navigation includes taxonomy paths under /games and /apps. + if len(result) == 0 { + doc.Find("a[href^='/games/'], a[href^='/apps/']").Each(func(_ int, s *goquery.Selection) { + href, ok := s.Attr("href") + if !ok { + return + } + href = strings.TrimSpace(href) + if href == "/games/all" || href == "/apps/all" { + return + } + name := strings.TrimSpace(s.Text()) + if name == "" { + name = strings.Trim(strings.TrimPrefix(href, "/games/"), "/") + if name == href { + name = strings.Trim(strings.TrimPrefix(href, "/apps/"), "/") + } + name = strings.ReplaceAll(name, "-", " ") + } + addCategory(name, href) + }) + } + return result +} + +func (c *Client) Search(query string, limit int) ([]SearchResult, error) { + if strings.TrimSpace(query) == "" { + return nil, nil + } + + query = strings.TrimSpace(query) + var searchURL string + isDirectURL := false + + // If query looks like a URL (absolute or category path), fetch it directly + if strings.HasPrefix(query, "http://") || strings.HasPrefix(query, "https://") || strings.HasPrefix(query, "/apps/") || strings.HasPrefix(query, "/games/") { + isDirectURL = true + if strings.HasPrefix(query, "http://") || strings.HasPrefix(query, "https://") { + searchURL = query + } else { + searchURL = BaseURL + query + } + } else { + // Regular search query + searchURL = fmt.Sprintf("%s/search/node/%s", BaseURL, url.PathEscape(query+" type:app,game")) + } + + netlog.Info("[MacGarden] searching URL: %s", searchURL) + doc, err := c.fetchDocument(searchURL) + if err != nil { + netlog.Warn("[MacGarden] search failed: %v", err) + return nil, err + } + if isDirectURL { + return c.parseCategoryResults(searchURL, doc, limit) + } + + searchBaseURL, err := url.Parse(searchURL) + if err != nil { + return c.parseSearchResults(doc, limit), nil + } + results := c.parseSearchResults(doc, 0) + for _, pageURL := range c.categoryPaginationURLs(searchBaseURL.Path, doc) { + if limit > 0 && len(results) >= limit { + break + } + pageDoc, err := c.fetchDocument(pageURL) + if err != nil { + netlog.Warn("[MacGarden] search page fetch failed: %v", err) + return nil, err + } + results = append(results, c.parseSearchResults(pageDoc, 0)...) + } + if limit > 0 && len(results) > limit { + results = results[:limit] + } + return results, nil +} + +func (c *Client) parseSearchResults(doc *goquery.Document, limit int) []SearchResult { + titleNodes := doc.Find("#paper > div.box > div > dl > dt.title a") + snippetNodes := doc.Find("dd .search-snippet") + infoNodes := doc.Find("dd .search-info") + count := titleNodes.Length() + if snippetNodes.Length() < count { + count = snippetNodes.Length() + } + if limit > 0 && count > limit { + count = limit + } + results := make([]SearchResult, 0, count) + for i := 0; i < count; i++ { + titleSel := titleNodes.Eq(i) + snippetSel := snippetNodes.Eq(i) + href, ok := titleSel.Attr("href") + if !ok { + continue + } + resultType := "" + uploadDate := time.Time{} + if i < infoNodes.Length() { + resultType, uploadDate = parseSearchInfo(strings.TrimSpace(infoNodes.Eq(i).Text())) + } + results = append(results, SearchResult{ + Name: strings.TrimSpace(titleSel.Text()), + URL: c.normalizeURL(href), + Snippet: strings.TrimSpace(snippetSel.Text()), + Type: resultType, + UploadDate: uploadDate, + }) + } + return results +} + +// parseSearchInfo parses "Type - User - Date - Time - N comments" from search-info. +// We currently care only about Type (App/Game) and upload timestamp. +func parseSearchInfo(info string) (string, time.Time) { + parts := strings.Split(info, " - ") + if len(parts) < 4 { + return "", time.Time{} + } + resultType := strings.TrimSpace(parts[0]) + if resultType != "App" && resultType != "Game" { + resultType = "" + } + + datePart := strings.TrimSpace(parts[2]) + timePart := strings.ToLower(strings.TrimSpace(parts[3])) + ts := strings.TrimSpace(datePart + " " + timePart) + if ts == "" { + return resultType, time.Time{} + } + for _, layout := range []string{"2006 Jan 2 3:04pm", "2006 Jan 2 03:04pm"} { + if t, err := time.ParseInLocation(layout, ts, time.Local); err == nil { + return resultType, t + } + } + return resultType, time.Time{} +} + +func (c *Client) parseCategoryResults(categoryURL string, doc *goquery.Document, limit int) ([]SearchResult, error) { + baseURL, err := url.Parse(categoryURL) + if err != nil { + return nil, err + } + seen := map[string]struct{}{} + results := c.appendCategoryResults(nil, seen, baseURL.Path, doc) + + for _, pageURL := range c.categoryPaginationURLs(baseURL.Path, doc) { + if limit > 0 && len(results) >= limit { + break + } + pageDoc, err := c.fetchDocument(pageURL) + if err != nil { + netlog.Warn("[MacGarden] category page fetch failed: %v", err) + return nil, err + } + results = c.appendCategoryResults(results, seen, baseURL.Path, pageDoc) + } + + if limit > 0 && len(results) > limit { + results = results[:limit] + } + return results, nil +} + +func (c *Client) GetCategoryPageInfo(categoryURL string) (CategoryPageInfo, error) { + doc, err := c.fetchDocument(categoryURL) + if err != nil { + return CategoryPageInfo{}, err + } + baseURL, err := url.Parse(categoryURL) + if err != nil { + return CategoryPageInfo{}, err + } + categoryPath := baseURL.Path + firstPage := c.appendCategoryResults(nil, map[string]struct{}{}, categoryPath, doc) + firstPageCount := len(firstPage) + pageURLs := c.categoryPaginationURLs(categoryPath, doc) + if len(pageURLs) == 0 { + return CategoryPageInfo{ + FirstPage: firstPage, + LastPage: firstPage, + FirstPageCount: firstPageCount, + LastPageCount: firstPageCount, + PageSize: firstPageCount, + LastPageNumber: 0, + TotalCount: firstPageCount, + }, nil + } + + lastPageURL := pageURLs[len(pageURLs)-1] + lastPageNumber := categoryPageNumber(lastPageURL) + if lastPageNumber <= 0 { + return CategoryPageInfo{ + FirstPage: firstPage, + LastPage: firstPage, + FirstPageCount: firstPageCount, + LastPageCount: firstPageCount, + PageSize: firstPageCount, + LastPageNumber: 0, + TotalCount: firstPageCount, + }, nil + } + + lastDoc, err := c.fetchDocument(lastPageURL) + if err != nil { + return CategoryPageInfo{}, err + } + lastPage := c.appendCategoryResults(nil, map[string]struct{}{}, categoryPath, lastDoc) + lastPageCount := len(lastPage) + // Pagination is zero-based: the root category/search page is logical page 0, + // so a last page query of ?page=1 means there are two pages total. + pageCount := 1 + lastPageNumber + return CategoryPageInfo{ + FirstPage: firstPage, + LastPage: lastPage, + FirstPageCount: firstPageCount, + LastPageCount: lastPageCount, + PageSize: firstPageCount, + LastPageNumber: lastPageNumber, + TotalCount: firstPageCount*(pageCount-1) + lastPageCount, + }, nil +} + +func (c *Client) CountCategoryItems(categoryURL string) (int, error) { + info, err := c.GetCategoryPageInfo(categoryURL) + if err != nil { + return 0, err + } + return info.TotalCount, nil +} + +// GetSearchPage fetches a single page of text-search results for query. +// pageNumber 0 is the first (unparameterized) page; subsequent pages use ?page=N. +func (c *Client) GetSearchPage(query string, pageNumber int) ([]SearchResult, error) { + query = strings.TrimSpace(query) + if query == "" { + return nil, nil + } + searchURL := fmt.Sprintf("%s/search/node/%s", BaseURL, url.PathEscape(query+" type:app,game")) + if pageNumber > 0 { + u, err := url.Parse(searchURL) + if err != nil { + return nil, err + } + q := u.Query() + q.Set("page", strconv.Itoa(pageNumber)) + u.RawQuery = q.Encode() + searchURL = u.String() + } + netlog.Info("[MacGarden] fetching search page %d: %s", pageNumber, searchURL) + doc, err := c.fetchDocument(searchURL) + if err != nil { + return nil, err + } + return c.parseSearchResults(doc, 0), nil +} + +func (c *Client) GetCategoryPage(categoryURL string, pageNumber int) ([]SearchResult, error) { + pageURL, err := categoryPageURL(categoryURL, pageNumber) + if err != nil { + return nil, err + } + doc, err := c.fetchDocument(pageURL) + if err != nil { + return nil, err + } + baseURL, err := url.Parse(categoryURL) + if err != nil { + return nil, err + } + return c.appendCategoryResults(nil, map[string]struct{}{}, baseURL.Path, doc), nil +} + +func (c *Client) appendCategoryResults(results []SearchResult, seen map[string]struct{}, categoryPath string, doc *goquery.Document) []SearchResult { + doc.Find("h2 a[href]").Each(func(_ int, s *goquery.Selection) { + href, ok := s.Attr("href") + if !ok { + return + } + normalized := c.normalizeURL(href) + if normalized == "" { + return + } + u, err := url.Parse(normalized) + if err != nil { + return + } + if u.Path == categoryPath || strings.Contains(u.RawQuery, "page=") { + return + } + key := strings.ToLower(normalized) + if _, exists := seen[key]; exists { + return + } + seen[key] = struct{}{} + results = append(results, SearchResult{ + Name: strings.TrimSpace(s.Text()), + URL: normalized, + }) + }) + return results +} + +func (c *Client) countCategoryResultsOnPage(categoryPath string, doc *goquery.Document) int { + count := 0 + doc.Find("h2 a[href]").Each(func(_ int, s *goquery.Selection) { + href, ok := s.Attr("href") + if !ok { + return + } + normalized := c.normalizeURL(href) + if normalized == "" { + return + } + u, err := url.Parse(normalized) + if err != nil { + return + } + if u.Path == categoryPath || strings.Contains(u.RawQuery, "page=") { + return + } + count++ + }) + return count +} + +func (c *Client) categoryPaginationURLs(categoryPath string, doc *goquery.Document) []string { + pages := map[string]struct{}{} + urls := make([]string, 0, 4) + doc.Find("a[href]").Each(func(_ int, s *goquery.Selection) { + href, ok := s.Attr("href") + if !ok { + return + } + normalized := c.normalizeURL(href) + if normalized == "" { + return + } + u, err := url.Parse(normalized) + if err != nil { + return + } + if u.Path != categoryPath || !strings.Contains(u.RawQuery, "page=") { + return + } + if _, exists := pages[normalized]; exists { + return + } + pages[normalized] = struct{}{} + urls = append(urls, normalized) + }) + sort.Slice(urls, func(i, j int) bool { + return categoryPageNumber(urls[i]) < categoryPageNumber(urls[j]) + }) + return urls +} + +func categoryPageNumber(raw string) int { + u, err := url.Parse(raw) + if err != nil { + return 0 + } + page := u.Query().Get("page") + if page == "" { + return 0 + } + var n int + _, _ = fmt.Sscanf(page, "%d", &n) + return n +} + +func categoryPageURL(categoryURL string, pageNumber int) (string, error) { + u, err := url.Parse(categoryURL) + if err != nil { + return "", err + } + if pageNumber <= 0 { + u.RawQuery = "" + return u.String(), nil + } + query := u.Query() + query.Set("page", fmt.Sprintf("%d", pageNumber)) + u.RawQuery = query.Encode() + return u.String(), nil +} + +func (c *Client) GetSoftwareItem(itemURL string) (*SoftwareItem, error) { + c.itemCacheMu.RLock() + ci, ok := c.itemCache[itemURL] + c.itemCacheMu.RUnlock() + if ok && ci.SoftwareItem != nil { + netlog.Debug("[MacGarden] item cache hit: %s", itemURL) + return ci.SoftwareItem, nil + } + netlog.Info("[MacGarden] fetching item: %s", itemURL) + doc, err := c.fetchDocument(itemURL) + if err != nil { + netlog.Warn("[MacGarden] failed to fetch item: %v", err) + return nil, err + } + netlog.Debug("[MacGarden] received page for item: %s", itemURL) + item := &SoftwareItem{URL: itemURL} + item.Title = strings.TrimSpace(doc.Find("#paper > h1").First().Text()) + if item.Title == "" { + item.Title = strings.TrimSpace(doc.Find("h1").First().Text()) + } + descParts := make([]string, 0, 8) + doc.Find("#paper > p").Each(func(_ int, s *goquery.Selection) { + text := strings.TrimSpace(s.Text()) + if text != "" { + descParts = append(descParts, text) + } + }) + item.Description = strings.Join(descParts, "\n\n") + doc.Find("#paper > div.game-preview > div.images a.thickbox").Each(func(_ int, s *goquery.Selection) { + href, ok := s.Attr("href") + if !ok { + return + } + u := c.normalizeURL(href) + if u != "" { + item.Screenshots = append(item.Screenshots, u) + } + }) + doc.Find("#paper > div.game-preview > div.descr .note.download").Each(func(_ int, s *goquery.Selection) { + firstAnchor := s.Find("a").First() + if strings.EqualFold(strings.TrimSpace(firstAnchor.Text()), "Purchase") { + return + } + details := DownloadDetails{} + title := strings.TrimSpace(s.Find("br + small").First().Contents().First().Text()) + details.Title = title + details.Size = strings.TrimSpace(strings.TrimPrefix(s.Find("br + small > i").First().Text(), "(")) + details.OS = strings.TrimSpace(s.Contents().Last().Text()) + s.Find("a").Each(func(_ int, a *goquery.Selection) { + href, ok := a.Attr("href") + if !ok { + return + } + u := c.normalizeURL(href) + if u == "" { + return + } + details.Links = append(details.Links, DownloadLink{Text: strings.TrimSpace(a.Text()), URL: u}) + }) + if len(details.Links) > 0 { + item.Downloads = append(item.Downloads, details) + } + }) + netlog.Info("[MacGarden] parsed item %q: %d screenshot(s), %d download group(s)", item.Title, len(item.Screenshots), len(item.Downloads)) + // Save to cache + c.itemCacheMu.Lock() + c.itemCache[itemURL] = cachedItemDetails{ + FetchedAt: time.Now(), + SoftwareItem: item, + } + c.itemCacheMu.Unlock() + c.saveItemCache() + return item, nil +} + +func (c *Client) ReadURLRange(fileURL string, offset int64, length int) ([]byte, error) { + if c.maxRangeSize > 0 && length > c.maxRangeSize { + length = c.maxRangeSize + } + rng := "" + if length > 0 { + rng = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1) + } + netlog.Info("[MacGarden] reading URL: %s range=%s", fileURL, rng) + req, err := http.NewRequest(http.MethodGet, fileURL, nil) + if err != nil { + return nil, err + } + if length > 0 { + req.Header.Set("Range", rng) + } + c.setHeaders(req) + resp, err := c.throttledDo(req) + if err != nil { + netlog.Warn("[MacGarden] failed to read URL: %v", err) + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + _, _ = io.Copy(io.Discard, resp.Body) + return nil, fmt.Errorf("unexpected status %d", resp.StatusCode) + } + return io.ReadAll(resp.Body) +} + +// CachedContentLength returns a previously stored size without any network I/O. +func (c *Client) CachedContentLength(fileURL string) (int64, bool) { + if sz, ok := c.lookupItemCacheHead(fileURL); ok { + return sz, true + } + return c.getCachedHead(fileURL) +} + +// FetchFull downloads the complete content of fileURL and returns the bytes. +func (c *Client) FetchFull(fileURL string) ([]byte, error) { + netlog.Info("[MacGarden] full fetch: %s", fileURL) + req, err := http.NewRequest(http.MethodGet, fileURL, nil) + if err != nil { + return nil, err + } + c.setHeaders(req) + resp, err := c.throttledDo(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + _, _ = io.Copy(io.Discard, resp.Body) + return nil, fmt.Errorf("unexpected status %d", resp.StatusCode) + } + return io.ReadAll(resp.Body) +} + +// GetContentLength returns the file size via a ranged GET, using both caches +// so repeated calls are free. Called during FPGetFileDirParms. +func (c *Client) GetContentLength(fileURL string) (int64, error) { + if sz, ok := c.getCachedHead(fileURL); ok { + return sz, nil + } + if sz, ok := c.lookupItemCacheHead(fileURL); ok { + return sz, nil + } + size, err := c.rangeContentLength(fileURL) + c.setCachedHead(fileURL, size) + return size, err +} + +func (c *Client) HeadContentLength(fileURL string) (int64, error) { + if !c.fetchHead { + return 0, nil + } + if sz, ok := c.lookupItemCacheHead(fileURL); ok { + return sz, nil + } + if sz, ok := c.getCachedHead(fileURL); ok { + return sz, nil + } + u, err := url.Parse(fileURL) + if err != nil { + c.setCachedHead(fileURL, 0) + return 0, err + } + if _, ok := c.allowedHost[strings.ToLower(u.Host)]; !ok { + c.setCachedHead(fileURL, 0) + return 0, nil + } + // download.macintoshgarden.org often rejects HEAD; use a ranged GET instead. + if strings.EqualFold(u.Host, "download.macintoshgarden.org") { + size, err := c.rangeContentLength(fileURL) + c.setCachedHead(fileURL, size) + c.recordHeadResult(fileURL, size) + return size, err + } + ctx, cancel := context.WithTimeout(context.Background(), headRequestTimeout) + defer cancel() + req, err := http.NewRequestWithContext(ctx, http.MethodHead, fileURL, nil) + if err != nil { + c.setCachedHead(fileURL, 0) + return 0, err + } + c.setHeaders(req) + netlog.Info("[MacGarden] HEAD request: %s", fileURL) + resp, err := c.throttledDo(req) + if err != nil { + netlog.Warn("[MacGarden] HEAD request failed: %v", err) + c.setCachedHead(fileURL, 0) + return 0, err + } + defer resp.Body.Close() + if resp.ContentLength >= 0 { + c.setCachedHead(fileURL, resp.ContentLength) + c.recordHeadResult(fileURL, resp.ContentLength) + return resp.ContentLength, nil + } + // Some hosts omit Content-Length on HEAD; fall back to a ranged GET. + size, rerr := c.rangeContentLength(fileURL) + if rerr == nil { + c.setCachedHead(fileURL, size) + c.recordHeadResult(fileURL, size) + return size, nil + } + c.setCachedHead(fileURL, 0) + return 0, nil +} + +func containsDownloadURL(item *SoftwareItem, fileURL string) bool { + if item == nil { + return false + } + for _, d := range item.Downloads { + for _, l := range d.Links { + if l.URL == fileURL { + return true + } + } + } + return false +} + +func (c *Client) rangeContentLength(fileURL string) (int64, error) { + netlog.Info("[MacGarden] ranged-size probe: %s", fileURL) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fileURL, nil) + if err != nil { + return 0, err + } + req.Header.Set("Range", "bytes=0-0") + c.setHeaders(req) + resp, err := c.throttledDo(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + if cr := strings.TrimSpace(resp.Header.Get("Content-Range")); cr != "" { + if slash := strings.LastIndex(cr, "/"); slash >= 0 && slash+1 < len(cr) { + total := strings.TrimSpace(cr[slash+1:]) + if total != "*" { + if n, perr := strconv.ParseInt(total, 10, 64); perr == nil && n >= 0 { + return n, nil + } + } + } + } + if resp.ContentLength >= 0 { + return resp.ContentLength, nil + } + return 0, fmt.Errorf("no size headers") +} + +func (c *Client) fetchDocument(urlStr string) (*goquery.Document, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + if _, ok := c.allowedHost[strings.ToLower(u.Host)]; !ok { + return nil, fmt.Errorf("host not allowed: %s", u.Host) + } + + if doc, ok, err := c.readDocumentFromCache(urlStr); err == nil && ok { + netlog.Debug("[MacGarden] cache hit: %s", urlStr) + return doc, nil + } else if err != nil { + netlog.Warn("[MacGarden] cache read failed for %s: %v", urlStr, err) + } + + netlog.Debug("[MacGarden] fetching document: %s", urlStr) + req, err := http.NewRequest(http.MethodGet, urlStr, nil) + if err != nil { + return nil, err + } + c.setHeaders(req) + resp, err := c.throttledDo(req) + if err != nil { + netlog.Warn("[MacGarden] HTTP request failed (%s): %v", urlStr, err) + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + _, _ = io.Copy(io.Discard, resp.Body) + return nil, fmt.Errorf("unexpected status %d", resp.StatusCode) + } + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if err := c.writeDocumentToCache(urlStr, body); err != nil { + netlog.Warn("[MacGarden] cache write failed for %s: %v", urlStr, err) + } + return goquery.NewDocumentFromReader(bytes.NewReader(body)) +} + +func (c *Client) readDocumentFromCache(urlStr string) (*goquery.Document, bool, error) { + cachePath := c.cachePathForURL(urlStr) + body, err := os.ReadFile(cachePath) + if err != nil { + if os.IsNotExist(err) { + return nil, false, nil + } + return nil, false, err + } + doc, err := goquery.NewDocumentFromReader(bytes.NewReader(body)) + if err != nil { + _ = os.Remove(cachePath) + return nil, false, err + } + return doc, true, nil +} + +func (c *Client) writeDocumentToCache(urlStr string, body []byte) error { + cachePath := c.cachePathForURL(urlStr) + cacheDir := filepath.Dir(cachePath) + if err := os.MkdirAll(cacheDir, 0o755); err != nil { + return err + } + tmpPath := cachePath + ".tmp" + if err := os.WriteFile(tmpPath, body, 0o644); err != nil { + return err + } + if err := os.Rename(tmpPath, cachePath); err != nil { + _ = os.Remove(cachePath) + if retryErr := os.Rename(tmpPath, cachePath); retryErr != nil { + _ = os.Remove(tmpPath) + return retryErr + } + } + return nil +} + +func (c *Client) cachePathForURL(urlStr string) string { + sum := sha1.Sum([]byte(strings.TrimSpace(urlStr))) + file := hex.EncodeToString(sum[:]) + ".html" + cacheDir := c.cacheDir + if strings.TrimSpace(cacheDir) == "" { + cacheDir = "._htmlcache" + } + return filepath.Join(cacheDir, file) +} + +func (c *Client) normalizeURL(raw string) string { + raw = strings.TrimSpace(raw) + if raw == "" { + return "" + } + u, err := url.Parse(raw) + if err != nil { + return "" + } + if !u.IsAbs() { + // Protocol-relative URL (e.g. //old.mac.gdn/path) — supply https scheme. + if strings.HasPrefix(raw, "//") { + u, err = url.Parse("http:" + raw) + } else { + u, err = url.Parse(BaseURL + "/" + strings.TrimLeft(raw, "/")) + } + if err != nil { + return "" + } + } + if _, ok := c.allowedHost[strings.ToLower(u.Host)]; !ok { + return "" + } + u.Fragment = "" + return u.String() +} + +func FileNameFromURL(fileURL string, fallback string) string { + u, err := url.Parse(fileURL) + if err != nil { + return fallback + } + base := path.Base(u.Path) + if base == "." || base == "/" || base == "" { + return fallback + } + return base +} diff --git a/service/macgarden/client_test.go b/service/macgarden/client_test.go new file mode 100644 index 0000000..9f46862 --- /dev/null +++ b/service/macgarden/client_test.go @@ -0,0 +1,513 @@ +package macgarden + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "github.com/PuerkitoBio/goquery" +) + +// requireLiveTests skips tests that reach the public Macintosh Garden site +// unless OMNITALK_LIVE_TESTS=1 is set. CI runners do not run these. +func requireLiveTests(t *testing.T) { + t.Helper() + if os.Getenv("OMNITALK_LIVE_TESTS") != "1" { + t.Skip("skipping live macintoshgarden.org test; set OMNITALK_LIVE_TESTS=1 to enable") + } +} + +type headErrorRoundTripper struct { + hits int +} + +func (rt *headErrorRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if req.Method == http.MethodHead { + rt.hits++ + return nil, errors.New("head failed") + } + return nil, errors.New("unexpected method") +} + +type probeRoundTripper struct { + headHits int + getHits int + rangeSeen string + mode string +} + +func (rt *probeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + switch req.Method { + case http.MethodHead: + rt.headHits++ + if rt.mode == "head-no-length" { + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(strings.NewReader("")), Header: make(http.Header), ContentLength: -1}, nil + } + return nil, errors.New("unexpected HEAD") + case http.MethodGet: + rt.getHits++ + rt.rangeSeen = req.Header.Get("Range") + if rt.rangeSeen != "bytes=0-0" { + return nil, errors.New("missing range header") + } + resp := &http.Response{StatusCode: http.StatusPartialContent, Body: io.NopCloser(strings.NewReader("x")), Header: make(http.Header), ContentLength: 1} + resp.Header.Set("Content-Range", "bytes 0-0/12345") + return resp, nil + default: + return nil, errors.New("unexpected method") + } +} + +func readyRateLimiter() <-chan time.Time { + ch := make(chan time.Time, 32) + for i := 0; i < cap(ch); i++ { + ch <- time.Now() + } + return ch +} + +func TestParseCategoriesFromDocument_ModernNavFallback(t *testing.T) { + html := ` + + Games + Apps + Strategy + Compression & Archiving + ` + doc, err := goquery.NewDocumentFromReader(strings.NewReader(html)) + if err != nil { + t.Fatalf("NewDocumentFromReader: %v", err) + } + + c := NewClient() + c.rateLimiter = readyRateLimiter() + cats := c.parseCategoriesFromDocument(doc) + if len(cats) != 2 { + t.Fatalf("expected 2 categories from fallback parse, got %d", len(cats)) + } + if cats[0].URL == "" || cats[1].URL == "" { + t.Fatal("expected normalized URLs for parsed categories") + } +} + +func TestParseSearchResults_ExtractsTypeAndUploadDate(t *testing.T) { + html := ` + +
+
ClarisWorks 4.0
+
+

Snippet text

+

App - MikeTomTom - 2025 Jul 24 - 5:53pm - 8 comments

+
+
+ ` + doc, err := goquery.NewDocumentFromReader(strings.NewReader(html)) + if err != nil { + t.Fatalf("NewDocumentFromReader: %v", err) + } + + c := NewClient() + c.rateLimiter = readyRateLimiter() + results := c.parseSearchResults(doc, 0) + if len(results) != 1 { + t.Fatalf("len(results) = %d, want 1", len(results)) + } + if results[0].Type != "App" { + t.Fatalf("Type = %q, want App", results[0].Type) + } + if results[0].UploadDate.IsZero() { + t.Fatal("UploadDate is zero, want parsed timestamp") + } + if got := results[0].UploadDate.Format("2006-01-02 15:04"); got != "2025-07-24 17:53" { + t.Fatalf("UploadDate = %q, want %q", got, "2025-07-24 17:53") + } +} + +func TestParseCategoryResults_FromCategoryPage(t *testing.T) { + requireLiveTests(t) + html := ` + +

Anti-Virus Boot Disk

+

ClamAV upgrade for Leopard Server

+

Antivirus

+ ` + doc, err := goquery.NewDocumentFromReader(strings.NewReader(html)) + if err != nil { + t.Fatalf("NewDocumentFromReader: %v", err) + } + + c := NewClient() + c.rateLimiter = readyRateLimiter() + results, err := c.parseCategoryResults("https://macintoshgarden.org/apps/utilities/antivirus", doc, 0) + if err != nil { + t.Fatalf("parseCategoryResults: %v", err) + } + if len(results) != 2 { + t.Fatalf("expected 2 item results, got %d", len(results)) + } + if results[0].Name != "Anti-Virus Boot Disk" { + t.Fatalf("first result name = %q", results[0].Name) + } + if results[1].URL != "https://macintoshgarden.org/apps/clamav-upgrade-leopard-server" { + t.Fatalf("second result URL = %q", results[1].URL) + } +} + +func TestParseCategoryResults_FollowsPagination(t *testing.T) { + pages := map[string]string{} + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + key := r.URL.Path + if r.URL.RawQuery != "" { + key += "?" + r.URL.RawQuery + } + body, ok := pages[key] + if !ok { + http.NotFound(w, r) + return + } + _, _ = fmt.Fprint(w, body) + })) + defer server.Close() + pages["/apps/utilities/antivirus"] = fmt.Sprintf(` + +

Anti-Virus Boot Disk

+ 1 + 2 + `, server.URL, server.URL, server.URL) + pages["/apps/utilities/antivirus?page=1"] = fmt.Sprintf(` + +

ClamAV upgrade for Leopard Server

+ `, server.URL) + pages["/apps/utilities/antivirus?page=2"] = fmt.Sprintf(` + +

SecureInit

+ `, server.URL) + + c := NewClient() + c.httpClient = server.Client() + c.rateLimiter = readyRateLimiter() + host := strings.TrimPrefix(server.URL, "https://") + c.allowedHost = map[string]struct{}{host: struct{}{}} + + doc, err := c.fetchDocument(server.URL + "/apps/utilities/antivirus") + if err != nil { + t.Fatalf("fetchDocument: %v", err) + } + results, err := c.parseCategoryResults(server.URL+"/apps/utilities/antivirus", doc, 0) + if err != nil { + t.Fatalf("parseCategoryResults: %v", err) + } + if len(results) != 3 { + t.Fatalf("expected 3 paginated results, got %d", len(results)) + } + if results[2].Name != "SecureInit" { + t.Fatalf("last result name = %q", results[2].Name) + } +} + +func TestCountCategoryItems_UsesFirstAndLastPages(t *testing.T) { + pages := map[string]string{} + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + key := r.URL.Path + if r.URL.RawQuery != "" { + key += "?" + r.URL.RawQuery + } + body, ok := pages[key] + if !ok { + http.NotFound(w, r) + return + } + _, _ = fmt.Fprint(w, body) + })) + defer server.Close() + pages["/apps/utilities/antivirus"] = fmt.Sprintf(` + +

Anti-Virus Boot Disk

+

ClamAV upgrade for Leopard Server

+ 1 + 2 + last » + `, server.URL, server.URL, server.URL, server.URL, server.URL) + pages["/apps/utilities/antivirus?page=2"] = fmt.Sprintf(` + +

SecureInit

+ `, server.URL) + + c := NewClient() + c.httpClient = server.Client() + c.rateLimiter = readyRateLimiter() + host := strings.TrimPrefix(server.URL, "https://") + c.allowedHost = map[string]struct{}{host: struct{}{}} + + count, err := c.CountCategoryItems(server.URL + "/apps/utilities/antivirus") + if err != nil { + t.Fatalf("CountCategoryItems: %v", err) + } + if count != 5 { + t.Fatalf("count = %d, want 5", count) + } +} + +func TestGetCategoryPageInfo_UsesFirstAndLastPages(t *testing.T) { + pages := map[string]string{} + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + key := r.URL.Path + if r.URL.RawQuery != "" { + key += "?" + r.URL.RawQuery + } + body, ok := pages[key] + if !ok { + http.NotFound(w, r) + return + } + _, _ = fmt.Fprint(w, body) + })) + defer server.Close() + pages["/apps/utilities/antivirus"] = fmt.Sprintf(` + +

Anti-Virus Boot Disk

+

ClamAV upgrade for Leopard Server

+ 1 + 2 + last » + `, server.URL, server.URL, server.URL, server.URL, server.URL) + pages["/apps/utilities/antivirus?page=2"] = fmt.Sprintf(` + +

SecureInit

+ `, server.URL) + + c := NewClient() + c.httpClient = server.Client() + c.rateLimiter = readyRateLimiter() + host := strings.TrimPrefix(server.URL, "https://") + c.allowedHost = map[string]struct{}{host: struct{}{}} + + info, err := c.GetCategoryPageInfo(server.URL + "/apps/utilities/antivirus") + if err != nil { + t.Fatalf("GetCategoryPageInfo: %v", err) + } + if info.TotalCount != 5 { + t.Fatalf("TotalCount = %d, want 5", info.TotalCount) + } + if info.FirstPageCount != 2 { + t.Fatalf("FirstPageCount = %d, want 2", info.FirstPageCount) + } + if info.LastPageNumber != 2 { + t.Fatalf("LastPageNumber = %d, want 2", info.LastPageNumber) + } + if len(info.LastPage) != 1 || info.LastPage[0].Name != "SecureInit" { + t.Fatalf("LastPage = %+v, want SecureInit only", info.LastPage) + } + if info.PageSize != 2 { + t.Fatalf("PageSize = %d, want 2", info.PageSize) + } +} + +func TestGetCategoryPageInfo_PageOneMeansSecondPage(t *testing.T) { + pages := map[string]string{} + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + key := r.URL.Path + if r.URL.RawQuery != "" { + key += "?" + r.URL.RawQuery + } + body, ok := pages[key] + if !ok { + http.NotFound(w, r) + return + } + _, _ = fmt.Fprint(w, body) + })) + defer server.Close() + pages["/apps/utilities/antivirus"] = fmt.Sprintf(` + +

Anti-Virus Boot Disk

+

ClamAV upgrade for Leopard Server

+ 2 + last » + `, server.URL, server.URL, server.URL, server.URL) + pages["/apps/utilities/antivirus?page=1"] = fmt.Sprintf(` + +

SecureInit

+ `, server.URL) + + c := NewClient() + c.httpClient = server.Client() + c.rateLimiter = readyRateLimiter() + host := strings.TrimPrefix(server.URL, "https://") + c.allowedHost = map[string]struct{}{host: {}} + + info, err := c.GetCategoryPageInfo(server.URL + "/apps/utilities/antivirus") + if err != nil { + t.Fatalf("GetCategoryPageInfo: %v", err) + } + if info.LastPageNumber != 1 { + t.Fatalf("LastPageNumber = %d, want 1", info.LastPageNumber) + } + if info.TotalCount != 3 { + t.Fatalf("TotalCount = %d, want 3", info.TotalCount) + } +} + +func TestGetCategoryPage_ReturnsSpecificPage(t *testing.T) { + pages := map[string]string{} + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + key := r.URL.Path + if r.URL.RawQuery != "" { + key += "?" + r.URL.RawQuery + } + body, ok := pages[key] + if !ok { + http.NotFound(w, r) + return + } + _, _ = fmt.Fprint(w, body) + })) + defer server.Close() + pages["/apps/utilities/antivirus?page=1"] = fmt.Sprintf(` + +

ClamAV upgrade for Leopard Server

+

Disinfectant

+ `, server.URL, server.URL) + + c := NewClient() + c.httpClient = server.Client() + c.rateLimiter = readyRateLimiter() + host := strings.TrimPrefix(server.URL, "https://") + c.allowedHost = map[string]struct{}{host: struct{}{}} + + results, err := c.GetCategoryPage(server.URL+"/apps/utilities/antivirus", 1) + if err != nil { + t.Fatalf("GetCategoryPage: %v", err) + } + if len(results) != 2 { + t.Fatalf("len(results) = %d, want 2", len(results)) + } + if results[0].Name != "ClamAV upgrade for Leopard Server" { + t.Fatalf("first result = %q", results[0].Name) + } + if results[1].URL != server.URL+"/apps/disinfectant" { + t.Fatalf("second result URL = %q", results[1].URL) + } +} + +func TestFetchDocument_UsesDiskCacheAcrossClients(t *testing.T) { + var mu sync.Mutex + hitCount := 0 + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/apps/utilities/antivirus" { + http.NotFound(w, r) + return + } + mu.Lock() + hitCount++ + mu.Unlock() + _, _ = fmt.Fprint(w, `

Anti-Virus Boot Disk

`) + })) + defer server.Close() + + host := strings.TrimPrefix(server.URL, "https://") + cacheDir := filepath.Join(t.TempDir(), "._htmlcache") + url := server.URL + "/apps/utilities/antivirus" + + c1 := NewClient() + c1.httpClient = server.Client() + c1.rateLimiter = readyRateLimiter() + c1.allowedHost = map[string]struct{}{host: {}} + c1.cacheDir = cacheDir + + if _, err := c1.fetchDocument(url); err != nil { + t.Fatalf("first fetchDocument: %v", err) + } + + c2 := NewClient() + c2.httpClient = server.Client() + c2.rateLimiter = readyRateLimiter() + c2.allowedHost = map[string]struct{}{host: {}} + c2.cacheDir = cacheDir + + if _, err := c2.fetchDocument(url); err != nil { + t.Fatalf("second fetchDocument: %v", err) + } + + mu.Lock() + gotHits := hitCount + mu.Unlock() + if gotHits != 1 { + t.Fatalf("network hit count = %d, want 1", gotHits) + } +} + +func TestHeadContentLength_FailureIsCached_NoRetry(t *testing.T) { + requireLiveTests(t) + rt := &headErrorRoundTripper{} + c := NewClient() + c.httpClient = &http.Client{Transport: rt} + c.rateLimiter = readyRateLimiter() + c.allowedHost = map[string]struct{}{"macintoshgarden.org": {}} + + _, err1 := c.HeadContentLength("https://macintoshgarden.org/files/fail.sit") + if err1 == nil { + t.Fatal("first HeadContentLength error = nil, want non-nil") + } + _, err2 := c.HeadContentLength("https://macintoshgarden.org/files/fail.sit") + if err2 == nil { + t.Fatal("second HeadContentLength error = nil, want cached non-nil") + } + if rt.hits != 1 { + t.Fatalf("HEAD hits = %d, want 1 (no retry)", rt.hits) + } +} + +func TestHeadContentLength_DownloadHost_UsesRangedProbe(t *testing.T) { + requireLiveTests(t) + rt := &probeRoundTripper{} + c := NewClient() + c.httpClient = &http.Client{Transport: rt} + c.rateLimiter = readyRateLimiter() + c.allowedHost = map[string]struct{}{"download.macintoshgarden.org": {}} + + size, err := c.HeadContentLength("https://download.macintoshgarden.org/files/demo.sit") + if err != nil { + t.Fatalf("HeadContentLength error: %v", err) + } + if size != 12345 { + t.Fatalf("size = %d, want 12345", size) + } + if rt.headHits != 0 { + t.Fatalf("HEAD hits = %d, want 0", rt.headHits) + } + if rt.getHits != 1 { + t.Fatalf("GET hits = %d, want 1", rt.getHits) + } +} + +func TestHeadContentLength_FallbackToRangedProbe_WhenHeadHasNoLength(t *testing.T) { + requireLiveTests(t) + rt := &probeRoundTripper{mode: "head-no-length"} + c := NewClient() + c.httpClient = &http.Client{Transport: rt} + c.rateLimiter = readyRateLimiter() + c.allowedHost = map[string]struct{}{"macintoshgarden.org": {}} + + size, err := c.HeadContentLength("https://macintoshgarden.org/files/demo.sit") + if err != nil { + t.Fatalf("HeadContentLength error: %v", err) + } + if size != 12345 { + t.Fatalf("size = %d, want 12345", size) + } + if rt.headHits != 1 { + t.Fatalf("HEAD hits = %d, want 1", rt.headHits) + } + if rt.getHits != 1 { + t.Fatalf("GET hits = %d, want 1", rt.getHits) + } +} diff --git a/service/macgarden/doc.go b/service/macgarden/doc.go new file mode 100644 index 0000000..2f1cf9d --- /dev/null +++ b/service/macgarden/doc.go @@ -0,0 +1,4 @@ +// Package macgarden is an HTTP client for macintoshgarden.org used by +// the optional macgarden AFP filesystem backend to expose archived +// classic Macintosh software as a read-only AFP volume. +package macgarden diff --git a/service/macip/dhcp_client.go b/service/macip/dhcp_client.go index 1b87c78..c38e8c8 100644 --- a/service/macip/dhcp_client.go +++ b/service/macip/dhcp_client.go @@ -1,3 +1,5 @@ +//go:build macip || all + // Package macip implements a minimal DHCP client used by the MacIP // gateway. It performs DHCP discover/request sequences on behalf of // AppleTalk clients by fabricating per-node Ethernet addresses and @@ -5,14 +7,16 @@ package macip import ( + "context" "encoding/binary" "math/rand" "net" "sync" "time" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port/nat" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/pkg/hwaddr" + "github.com/pgodw/omnitalk/port/nat" ) const ( @@ -89,6 +93,10 @@ type dhcpClient struct { // link is the IPv4 link used to transmit/receive packets. link *etherIPLink + // stop signals service shutdown; in-flight RequestIP calls abort + // instead of blocking on dhcpTimeout. + stop <-chan struct{} + // mu protects the pending map. mu sync.Mutex // pending maps DHCP transaction ids to active pendingDHCP entries. @@ -96,10 +104,12 @@ type dhcpClient struct { } // newDHCPClient constructs a dhcpClient that will use the provided -// IP link to perform DHCP transactions. -func newDHCPClient(link *etherIPLink) *dhcpClient { +// IP link to perform DHCP transactions. stop is the service's lifecycle +// channel; once closed, in-flight DHCP transactions return early. +func newDHCPClient(link *etherIPLink, stop <-chan struct{}) *dhcpClient { return &dhcpClient{ link: link, + stop: stop, pending: make(map[uint32]*pendingDHCP), } } @@ -120,14 +130,14 @@ func (c *dhcpClient) run(stop <-chan struct{}) { // fabricateMACForAT builds a locally administered Ethernet MAC from an // AppleTalk address, giving each Mac a stable identity for the DHCP server. func fabricateMACForAT(atNet uint16, atNode uint8) net.HardwareAddr { - // 0x02 = locally administered, unicast; last two bytes = "MI" (MacIP). - return net.HardwareAddr{0x02, byte(atNet >> 8), byte(atNet), atNode, 0x4D, 0x49} + e := hwaddr.MacIPEthernetFromAppleTalk(hwaddr.AppleTalk{Network: atNet, Node: atNode}) + return e.HardwareAddr() } // RequestIP performs the full DHCP Discover→Offer→Request→Ack handshake for // the given AppleTalk node. If preferredIP is non-nil it is sent as option 50. -// Returns nil if DHCP fails or times out. -func (c *dhcpClient) RequestIP(atNet uint16, atNode uint8, preferredIP net.IP) *dhcpResult { +// Returns nil if DHCP fails, times out, the service stops, or ctx is cancelled. +func (c *dhcpClient) RequestIP(ctx context.Context, atNet uint16, atNode uint8, preferredIP net.IP) *dhcpResult { xid := rand.Uint32() fabMAC := fabricateMACForAT(atNet, atNode) p := &pendingDHCP{ @@ -148,10 +158,18 @@ func (c *dhcpClient) RequestIP(atNet uint16, atNode uint8, preferredIP net.IP) * c.sendDiscover(p, preferredIP) + timer := time.NewTimer(dhcpTimeout) + defer timer.Stop() select { case res := <-p.ch: return res // nil on NAK - case <-time.After(dhcpTimeout): + case <-ctx.Done(): + netlog.Debug("[macip-dhcp] aborting DHCP wait for AT %d.%d xid=0x%08x: %v", atNet, atNode, xid, ctx.Err()) + return nil + case <-c.stop: + netlog.Debug("[macip-dhcp] aborting DHCP wait for AT %d.%d xid=0x%08x: service stopping", atNet, atNode, xid) + return nil + case <-timer.C: netlog.Debug("[macip-dhcp] timeout waiting for Ack AT %d.%d xid=0x%08x", atNet, atNode, xid) return nil } diff --git a/service/macip/etherlink.go b/service/macip/etherlink.go index 832f00e..fd2c6c4 100644 --- a/service/macip/etherlink.go +++ b/service/macip/etherlink.go @@ -1,3 +1,5 @@ +//go:build macip || all + package macip import ( @@ -8,8 +10,8 @@ import ( "sync" "time" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port/rawlink" + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port/rawlink" ) const ( @@ -69,6 +71,8 @@ type etherIPLink struct { dhcpInbound chan []byte // stop is closed to request goroutine termination. stop chan struct{} + // wg tracks background goroutines so close() can join them deterministically. + wg sync.WaitGroup } // newEtherIPLink wraps the provided RawLink into an etherIPLink ready to @@ -103,8 +107,13 @@ func newEtherIPLink(link rawlink.RawLink, ourMAC net.HardwareAddr, hostIP net.IP // start launches background goroutines for packet capture and optionally // probes the configured default gateway to prime the ARP cache. func (l *etherIPLink) start() { - go l.readLoop() + l.wg.Add(2) + go func() { + defer l.wg.Done() + l.readLoop() + }() go func() { + defer l.wg.Done() gw := l.getDefaultGateway() if _, err := l.resolveMAC(gw); err != nil { netlog.Warn("macip: could not ARP for default gateway %s: %v", gw, err) @@ -137,10 +146,13 @@ func (l *etherIPLink) setDefaultGateway(gw net.IP) { l.gwMu.Unlock() } -// close stops background processing and closes the rawlink. +// close stops background processing and closes the rawlink. Blocks until +// the readLoop and gateway-probe goroutines have exited so callers see a +// fully-quiesced link on return. func (l *etherIPLink) close() { close(l.stop) l.link.Close() + l.wg.Wait() } // sendFrame transmits a raw Ethernet frame via the underlying rawlink. @@ -397,23 +409,35 @@ func (l *etherIPLink) resolveMAC(ip net.IP) (net.HardwareAddr, error) { l.sendARPRequest(ip4) + timer := time.NewTimer(arpLookupTimeout) + defer timer.Stop() select { case mac := <-ch: return mac, nil - case <-time.After(arpLookupTimeout): - l.arpMu.Lock() - waiters := l.arpWait[key] - for i, c := range waiters { - if c == ch { - l.arpWait[key] = append(waiters[:i], waiters[i+1:]...) - break - } - } - l.arpMu.Unlock() + case <-l.stop: + l.dropARPWaiter(key, ch) + return nil, fmt.Errorf("ARP lookup aborted for %s: link closing", ip4) + case <-timer.C: + l.dropARPWaiter(key, ch) return nil, fmt.Errorf("ARP timeout for %s", ip4) } } +// dropARPWaiter removes ch from the waiter list for key. Called when an +// ARP request gives up (timeout or shutdown) so the next reply that +// arrives doesn't get delivered to a goroutine that has already moved on. +func (l *etherIPLink) dropARPWaiter(key [4]byte, ch chan net.HardwareAddr) { + l.arpMu.Lock() + waiters := l.arpWait[key] + for i, c := range waiters { + if c == ch { + l.arpWait[key] = append(waiters[:i], waiters[i+1:]...) + break + } + } + l.arpMu.Unlock() +} + // sendIPPacket injects a raw IPv4 packet onto the IP-side Ethernet network. // Used for on-subnet traffic to pool IPs. Off-subnet traffic goes via OSNAT. func (l *etherIPLink) sendIPPacket(pkt []byte) error { diff --git a/service/macip/macip.go b/service/macip/macip.go index d6632da..fac9b1d 100644 --- a/service/macip/macip.go +++ b/service/macip/macip.go @@ -1,3 +1,5 @@ +//go:build macip || all + // Package macip implements a MacIP gateway service (equivalent of macipgw). // It bridges IP traffic between an Ethernet rawlink and AppleTalk nodes using // the MacIP protocol: @@ -10,17 +12,20 @@ package macip import ( + "context" "encoding/binary" "net" + "sync" "time" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/port/nat" - "github.com/pgodw/omnitalk/go/port/rawlink" - "github.com/pgodw/omnitalk/go/service" - "github.com/pgodw/omnitalk/go/service/zip" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/port/nat" + "github.com/pgodw/omnitalk/port/rawlink" + "github.com/pgodw/omnitalk/service" + "github.com/pgodw/omnitalk/service/zip" ) const ( @@ -77,14 +82,20 @@ type Service struct { osnat *nat.OSNAT dhcp *dhcpClient link *etherIPLink - router service.Router // set in Start(), read-only afterwards + router service.DatagramRouter // set in Start(), read-only afterwards ch chan inboundPkt stop chan struct{} + + // ctx is cancelled when Stop() is called and is the parent of any + // per-request contexts handed to background work (DHCP, etc.). + ctx context.Context + ctxCancel context.CancelFunc + wg sync.WaitGroup } type inboundPkt struct { - d appletalk.Datagram + d ddp.Datagram p port.Port } @@ -106,7 +117,7 @@ func New(gwIP, network net.IP, mask net.IPMask, nameserver, broadcast net.IP, zone []byte, nbp *zip.NameInformationService, ipLink rawlink.RawLink, ipOurMAC net.HardwareAddr, ipHostIP, ipDefaultGW net.IP, natEnabled bool, dhcpMode bool, stateFile string) *Service { - return &Service{ + s := &Service{ gwIP: gwIP.To4(), subnetMask: mask, nameserverIP: nameserver.To4(), @@ -124,14 +135,16 @@ func New(gwIP, network net.IP, mask net.IPMask, nameserver, broadcast net.IP, ch: make(chan inboundPkt, 256), stop: make(chan struct{}), } + return s } // Socket returns the AppleTalk socket number for this service. func (s *Service) Socket() uint8 { return Socket } // Start opens the pcap IP link, registers the NBP name and starts goroutines. -func (s *Service) Start(r service.Router) error { +func (s *Service) Start(ctx context.Context, r service.Router) error { s.router = r + s.ctx, s.ctxCancel = context.WithCancel(ctx) // Resolve zone name if not supplied. if len(s.zoneName) == 0 { @@ -159,7 +172,7 @@ func (s *Service) Start(r service.Router) error { s.link.start() if s.dhcpMode { - s.dhcp = newDHCPClient(s.link) + s.dhcp = newDHCPClient(s.link, s.stop) go s.dhcp.run(s.stop) netlog.Info("macip: DHCP relay enabled — relaying DHCP and converting responses to MacIP configuration for clients") } @@ -169,9 +182,10 @@ func (s *Service) Start(r service.Router) error { // Register as ":IPGATEWAY@" so Macs can find us via NBP. s.nbp.RegisterName([]byte(s.gwIP.String()), []byte("IPGATEWAY"), s.zoneName, Socket) - go s.inboundLoop() - go s.ipInboundLoop() - go s.expiryLoop() + s.wg.Add(3) + go func() { defer s.wg.Done(); s.inboundLoop() }() + go func() { defer s.wg.Done(); s.ipInboundLoop() }() + go func() { defer s.wg.Done(); s.expiryLoop() }() netlog.Info("macip: gateway started gw=%s host-ip=%s zone=%q", s.gwIP, s.ipHostIP, s.zoneName) if !s.natEnabled && !s.dhcpMode { @@ -188,6 +202,7 @@ func (s *Service) Start(r service.Router) error { // Stop unregisters NBP, closes the IP link and shuts down all goroutines. func (s *Service) Stop() error { s.nbp.UnregisterName([]byte(s.gwIP.String()), []byte("IPGATEWAY"), s.zoneName) + s.ctxCancel() close(s.stop) if s.osnat != nil { s.osnat.Close() @@ -195,6 +210,7 @@ func (s *Service) Stop() error { if s.link != nil { s.link.close() } + s.wg.Wait() s.pool.saveToFile(s.stateFile) return nil } @@ -217,7 +233,7 @@ func (s *Service) MarkSessionActivity(sessionID uint8) { } // Inbound is called by the router for every DDP datagram addressed to socket 72. -func (s *Service) Inbound(d appletalk.Datagram, p port.Port) { +func (s *Service) Inbound(d ddp.Datagram, p port.Port) { select { case s.ch <- inboundPkt{d: d, p: p}: default: @@ -242,7 +258,7 @@ func (s *Service) inboundLoop() { } // handleATPConfig processes an ATP TReq on socket 72: an IP address request. -func (s *Service) handleATPConfig(d appletalk.Datagram, rx port.Port) { +func (s *Service) handleATPConfig(d ddp.Datagram, rx port.Port) { atNet, atNode := normalizeATSource(d, rx) if !validATEndpoint(atNet, atNode) { netlog.Warn("macip: dropping ATP config request with invalid source AT %d.%d", d.SourceNetwork, d.SourceNode) @@ -293,7 +309,7 @@ func (s *Service) handleATPConfig(d appletalk.Datagram, rx port.Port) { return } } - go s.handleATPConfigDHCP(d, rx, tid, requestedIP, atNet, atNode) + go s.handleATPConfigDHCP(s.ctx, d, rx, tid, requestedIP, atNet, atNode) return } @@ -311,7 +327,7 @@ func (s *Service) handleATPConfig(d appletalk.Datagram, rx port.Port) { } // sendATPConfigResp builds and sends an ATP TResp with the given IP configuration. -func (s *Service) sendATPConfigResp(d appletalk.Datagram, rx port.Port, tid uint16, assignedIP, nameserver, broadcast net.IP, mask net.IPMask) { +func (s *Service) sendATPConfigResp(d ddp.Datagram, rx port.Port, tid uint16, assignedIP, nameserver, broadcast net.IP, mask net.IPMask) { resp := make([]byte, 4+configDataLen) resp[0] = atpFuncTResp | atpEOM resp[1] = 0 // seq 0 @@ -335,8 +351,8 @@ func (s *Service) sendATPConfigResp(d appletalk.Datagram, rx port.Port, tid uint // handleATPConfigDHCP runs in its own goroutine: performs a full DHCP exchange // and sends the ATP TResp once an address is assigned. -func (s *Service) handleATPConfigDHCP(d appletalk.Datagram, rx port.Port, tid uint16, requestedIP net.IP, atNet uint16, atNode uint8) { - res := s.dhcp.RequestIP(atNet, atNode, requestedIP) +func (s *Service) handleATPConfigDHCP(ctx context.Context, d ddp.Datagram, rx port.Port, tid uint16, requestedIP net.IP, atNet uint16, atNode uint8) { + res := s.dhcp.RequestIP(ctx, atNet, atNode, requestedIP) if res == nil { netlog.Warn("macip-dhcp: no DHCP response for AT %d.%d — not replying to ATP", atNet, atNode) return @@ -367,7 +383,7 @@ func (s *Service) handleATPConfigDHCP(d appletalk.Datagram, rx port.Port, tid ui } // handleMacIPData processes a DDP type 22 packet: a raw IP packet from a Mac. -func (s *Service) handleMacIPData(d appletalk.Datagram) { +func (s *Service) handleMacIPData(d ddp.Datagram) { if len(d.Data) < 20 { netlog.Debug("macip: dropping short MacIP data from AT %d.%d (len=%d)", d.SourceNetwork, d.SourceNode, len(d.Data)) @@ -416,7 +432,7 @@ func (s *Service) routeIPToMac(atNet uint16, atNode uint8, pkt []byte) { return } for _, frag := range frags { - if err := s.router.Route(appletalk.Datagram{ + if err := s.router.Route(ddp.Datagram{ DestinationNetwork: atNet, DestinationNode: atNode, DestinationSocket: Socket, @@ -429,7 +445,7 @@ func (s *Service) routeIPToMac(atNet uint16, atNode uint8, pkt []byte) { } } -func normalizeATSource(d appletalk.Datagram, rx port.Port) (uint16, uint8) { +func normalizeATSource(d ddp.Datagram, rx port.Port) (uint16, uint8) { atNet := d.SourceNetwork if atNet == 0 && rx != nil && rx.Network() != 0 { atNet = rx.Network() @@ -495,7 +511,7 @@ func (s *Service) handleGatewayICMP(srcNet uint16, srcNode uint8, pkt []byte) { binary.BigEndian.PutUint16(reply[ihl+2:ihl+4], nat.RawChecksum(reply[ihl:])) netlog.Debug("macip: ICMP echo reply %s→%s via AT %d.%d", s.gwIP, clientIP, atNet, atNode) - _ = s.router.Route(appletalk.Datagram{ + _ = s.router.Route(ddp.Datagram{ DestinationNetwork: atNet, DestinationNode: atNode, DestinationSocket: Socket, diff --git a/service/macip/pool.go b/service/macip/pool.go index 5710de7..5e4480b 100644 --- a/service/macip/pool.go +++ b/service/macip/pool.go @@ -1,3 +1,5 @@ +//go:build macip || all + package macip import ( @@ -7,7 +9,7 @@ import ( "sync" "time" - "github.com/pgodw/omnitalk/go/netlog" + "github.com/pgodw/omnitalk/netlog" ) const leaseDuration = 5 * time.Minute diff --git a/service/macip/pool_test.go b/service/macip/pool_test.go index f55cbcb..ebd9eb2 100644 --- a/service/macip/pool_test.go +++ b/service/macip/pool_test.go @@ -1,3 +1,5 @@ +//go:build macip || all + package macip import ( diff --git a/service/macip/state.go b/service/macip/state.go index 054e72e..7313835 100644 --- a/service/macip/state.go +++ b/service/macip/state.go @@ -1,3 +1,5 @@ +//go:build macip || all + package macip import ( @@ -6,7 +8,7 @@ import ( "os" "time" - "github.com/pgodw/omnitalk/go/netlog" + "github.com/pgodw/omnitalk/netlog" ) type savedLease struct { diff --git a/service/mock_router_port_test.go b/service/mock_router_port_test.go deleted file mode 100644 index 2cb4092..0000000 --- a/service/mock_router_port_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package service - -import ( - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/port" -) - -type mockPort struct { - shortStringFunc func() string - startFunc func(router port.RouterHooks) error - stopFunc func() error - unicastFunc func(network uint16, node uint8, datagram appletalk.Datagram) - broadcastFunc func(datagram appletalk.Datagram) - multicastFunc func(zoneName []byte, datagram appletalk.Datagram) - setNetworkRangeFunc func(networkMin, networkMax uint16) error - networkFunc func() uint16 - nodeFunc func() uint8 - networkMinFunc func() uint16 - networkMaxFunc func() uint16 - extendedNetworkFunc func() bool -} - -func (m *mockPort) ShortString() string { return m.shortStringFunc() } -func (m *mockPort) Start(router port.RouterHooks) error { return m.startFunc(router) } -func (m *mockPort) Stop() error { return m.stopFunc() } -func (m *mockPort) Unicast(network uint16, node uint8, datagram appletalk.Datagram) { - m.unicastFunc(network, node, datagram) -} -func (m *mockPort) Broadcast(datagram appletalk.Datagram) { m.broadcastFunc(datagram) } -func (m *mockPort) Multicast(zoneName []byte, datagram appletalk.Datagram) { - m.multicastFunc(zoneName, datagram) -} -func (m *mockPort) SetNetworkRange(networkMin, networkMax uint16) error { - return m.setNetworkRangeFunc(networkMin, networkMax) -} -func (m *mockPort) Network() uint16 { return m.networkFunc() } -func (m *mockPort) Node() uint8 { return m.nodeFunc() } -func (m *mockPort) NetworkMin() uint16 { return m.networkMinFunc() } -func (m *mockPort) NetworkMax() uint16 { return m.networkMaxFunc() } -func (m *mockPort) ExtendedNetwork() bool { return m.extendedNetworkFunc() } - -type mockRouter struct { - routeFunc func(datagram appletalk.Datagram, originating bool) error - replyFunc func(datagram appletalk.Datagram, rxPort port.Port, ddpType uint8, data []byte) - portsListFunc func() []port.Port - routingGetByNetworkFunc func(network uint16) (*RouteEntry, *bool) - routingEntriesFunc func() []struct { - Entry *RouteEntry - Bad bool - } - routingConsiderFunc func(entry *RouteEntry) bool - routingMarkBadFunc func(networkMin, networkMax uint16) bool - zonesInNetworkRangeFunc func(networkMin uint16, networkMax *uint16) ([][]byte, error) - networksInZoneFunc func(zoneName []byte) []uint16 - zonesFunc func() [][]byte - addNetworksToZoneFunc func(zoneName []byte, networkMin uint16, networkMax *uint16) error - routingTableAgeFunc func() -} - -func (m *mockRouter) Route(datagram appletalk.Datagram, originating bool) error { - return m.routeFunc(datagram, originating) -} -func (m *mockRouter) Reply(datagram appletalk.Datagram, rxPort port.Port, ddpType uint8, data []byte) { - m.replyFunc(datagram, rxPort, ddpType, data) -} -func (m *mockRouter) PortsList() []port.Port { return m.portsListFunc() } -func (m *mockRouter) RoutingGetByNetwork(network uint16) (*RouteEntry, *bool) { - return m.routingGetByNetworkFunc(network) -} -func (m *mockRouter) RoutingEntries() []struct { - Entry *RouteEntry - Bad bool -} { - return m.routingEntriesFunc() -} -func (m *mockRouter) RoutingConsider(entry *RouteEntry) bool { return m.routingConsiderFunc(entry) } -func (m *mockRouter) RoutingMarkBad(networkMin, networkMax uint16) bool { - return m.routingMarkBadFunc(networkMin, networkMax) -} -func (m *mockRouter) ZonesInNetworkRange(networkMin uint16, networkMax *uint16) ([][]byte, error) { - return m.zonesInNetworkRangeFunc(networkMin, networkMax) -} -func (m *mockRouter) NetworksInZone(zoneName []byte) []uint16 { return m.networksInZoneFunc(zoneName) } -func (m *mockRouter) Zones() [][]byte { return m.zonesFunc() } -func (m *mockRouter) AddNetworksToZone(zoneName []byte, networkMin uint16, networkMax *uint16) error { - return m.addNetworksToZoneFunc(zoneName, networkMin, networkMax) -} -func (m *mockRouter) RoutingTableAge() { m.routingTableAgeFunc() } diff --git a/service/rtmp/doc.go b/service/rtmp/doc.go new file mode 100644 index 0000000..18d5ec5 --- /dev/null +++ b/service/rtmp/doc.go @@ -0,0 +1,8 @@ +// Package rtmp implements the Routing Table Maintenance Protocol. +// +// It provides a RespondingService (replies to Route Data Requests on +// socket 1) and a SendingService (periodically broadcasts the local +// routing table to neighbouring routers). +// +// See spec/05-rtmp.md and Inside AppleTalk 2/e §5. +package rtmp diff --git a/service/rtmp/responding.go b/service/rtmp/responding.go index e8fae42..ed21c44 100644 --- a/service/rtmp/responding.go +++ b/service/rtmp/responding.go @@ -1,35 +1,43 @@ package rtmp import ( + "context" "encoding/binary" + "sync" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/service" ) type RespondingService struct { ch chan struct { - d appletalk.Datagram + d ddp.Datagram p port.Port } stop chan struct{} + wg sync.WaitGroup } func NewRespondingService() *RespondingService { return &RespondingService{ ch: make(chan struct { - d appletalk.Datagram + d ddp.Datagram p port.Port }, 256), stop: make(chan struct{}), } } -func (s *RespondingService) Start(r service.Router) error { +func (s *RespondingService) Start(ctx context.Context, r service.Router) error { + s.wg.Add(1) go func() { + defer s.wg.Done() for { select { + case <-ctx.Done(): + return case <-s.stop: return case item := <-s.ch: @@ -128,11 +136,15 @@ func (s *RespondingService) Start(r service.Router) error { return nil } -func (s *RespondingService) Stop() error { close(s.stop); return nil } -func (s *RespondingService) Inbound(d appletalk.Datagram, p port.Port) { +func (s *RespondingService) Stop() error { + close(s.stop) + s.wg.Wait() + return nil +} +func (s *RespondingService) Inbound(d ddp.Datagram, p port.Port) { select { case s.ch <- struct { - d appletalk.Datagram + d ddp.Datagram p port.Port }{d: d, p: p}: default: diff --git a/service/rtmp/routing_table_aging.go b/service/rtmp/routing_table_aging.go index 85d5295..271e401 100644 --- a/service/rtmp/routing_table_aging.go +++ b/service/rtmp/routing_table_aging.go @@ -1,37 +1,52 @@ package rtmp import ( + "context" + "sync" "time" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/service" ) type RoutingTableAgingService struct { timeout time.Duration stop chan struct{} + wg sync.WaitGroup } func NewRoutingTableAgingService() *RoutingTableAgingService { return &RoutingTableAgingService{timeout: 20 * time.Second, stop: make(chan struct{})} } -func (s *RoutingTableAgingService) Start(router service.Router) error { +func (s *RoutingTableAgingService) Start(ctx context.Context, router service.Router) error { + // Narrow to RouteIndex inside the goroutine so the type signature + // documents the only capability this loop touches. + idx := service.RouteIndex(router) + s.wg.Add(1) go func() { + defer s.wg.Done() t := time.NewTicker(s.timeout) defer t.Stop() for { select { + case <-ctx.Done(): + return case <-s.stop: return case <-t.C: - router.RoutingTableAge() + idx.RoutingTableAge() } } }() return nil } -func (s *RoutingTableAgingService) Stop() error { close(s.stop); return nil } -func (s *RoutingTableAgingService) Inbound(_ appletalk.Datagram, _ port.Port) {} +func (s *RoutingTableAgingService) Stop() error { + close(s.stop) + s.wg.Wait() + return nil +} +func (s *RoutingTableAgingService) Inbound(_ ddp.Datagram, _ port.Port) {} diff --git a/service/rtmp/rtmp.go b/service/rtmp/rtmp.go index 1916dc3..efcf724 100644 --- a/service/rtmp/rtmp.go +++ b/service/rtmp/rtmp.go @@ -3,22 +3,25 @@ package rtmp import ( "encoding/binary" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/service" + "github.com/pgodw/omnitalk/protocol/ddp" + prtmp "github.com/pgodw/omnitalk/protocol/rtmp" + + "github.com/pgodw/omnitalk/service" ) +// Wire constants re-exported from protocol/rtmp. const ( - SAS = 1 - DDPTypeData = 1 - DDPTypeRequest = 5 - Version = 0x82 - FuncRequest = 1 - FuncRDRSplitHorizon = 2 - FuncRDRNoSplitHorizon = 3 - NotifyNeighborDistance = 31 + SAS = prtmp.SAS + DDPTypeData = prtmp.DDPTypeData + DDPTypeRequest = prtmp.DDPTypeRequest + Version = prtmp.Version + FuncRequest = prtmp.FuncRequest + FuncRDRSplitHorizon = prtmp.FuncRDRSplitHorizon + FuncRDRNoSplitHorizon = prtmp.FuncRDRNoSplitHorizon + NotifyNeighborDistance = prtmp.NotifyNeighborDistance ) -func makeRoutingTableDatagramData(r service.Router, p interface { +func makeRoutingTableDatagramData(r service.RouteIndex, p interface { NetworkMin() uint16 NetworkMax() uint16 Network() uint16 @@ -62,7 +65,7 @@ func makeRoutingTableDatagramData(r service.Router, p interface { var out [][]byte curr := append([]byte(nil), header...) for _, t := range tuples { - if len(curr)+len(t) > appletalk.MaxDataLength { + if len(curr)+len(t) > ddp.MaxDataLength { out = append(out, curr) curr = append(append([]byte(nil), header...), t...) } else { diff --git a/service/rtmp/sending.go b/service/rtmp/sending.go index f19478c..87f1628 100644 --- a/service/rtmp/sending.go +++ b/service/rtmp/sending.go @@ -1,28 +1,36 @@ package rtmp import ( + "context" + "sync" "time" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/service" ) type SendingService struct { timeout time.Duration stop chan struct{} + wg sync.WaitGroup } func NewSendingService() *SendingService { return &SendingService{timeout: 10 * time.Second, stop: make(chan struct{})} } -func (s *SendingService) Start(r service.Router) error { +func (s *SendingService) Start(ctx context.Context, r service.Router) error { + s.wg.Add(1) go func() { + defer s.wg.Done() t := time.NewTicker(s.timeout) defer t.Stop() for { select { + case <-ctx.Done(): + return case <-s.stop: return case <-t.C: @@ -31,7 +39,7 @@ func (s *SendingService) Start(r service.Router) error { continue } for _, data := range makeRoutingTableDatagramData(r, p, true) { - p.Broadcast(appletalk.Datagram{ + p.Broadcast(ddp.Datagram{ DestinationNetwork: 0, SourceNetwork: p.Network(), DestinationNode: 0xFF, SourceNode: p.Node(), DestinationSocket: SAS, SourceSocket: SAS, DDPType: DDPTypeData, Data: data, }) @@ -43,5 +51,10 @@ func (s *SendingService) Start(r service.Router) error { return nil } -func (s *SendingService) Stop() error { close(s.stop); return nil } -func (s *SendingService) Inbound(_ appletalk.Datagram, _ port.Port) {} +func (s *SendingService) Stop() error { + close(s.stop) + s.wg.Wait() + return nil +} + +func (s *SendingService) Inbound(_ ddp.Datagram, _ port.Port) {} diff --git a/service/service.go b/service/service.go index ea6bf47..b0306aa 100644 --- a/service/service.go +++ b/service/service.go @@ -1,14 +1,24 @@ package service import ( - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/port" + "context" + + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/port" ) +// Service is the contract every service registered with the router +// satisfies. Start receives a parent context that is cancelled when the +// router shuts down; implementations should derive their own per-goroutine +// contexts from it so background work can be aborted without waiting for +// hardcoded timeouts. Stop is still required for synchronous teardown of +// resources that the context cannot itself release (open files, OS NAT, +// pcap handles). type Service interface { - Start(router Router) error + Start(ctx context.Context, router Router) error Stop() error - Inbound(datagram appletalk.Datagram, rxPort port.Port) + Inbound(datagram ddp.Datagram, rxPort port.Port) } // PacketDumper is a sink for service-level packet logging. @@ -21,10 +31,21 @@ type PacketDumpAware interface { SetPacketDumper(dumper PacketDumper) } -type Router interface { - Route(datagram appletalk.Datagram, originating bool) error - Reply(datagram appletalk.Datagram, rxPort port.Port, ddpType uint8, data []byte) +// DatagramRouter is what every service can assume of the router: send a +// datagram and reply to one. The router-shaped capabilities below +// (RouteIndex, ZoneIndex) are layered on for the small number of +// services that maintain those tables. +type DatagramRouter interface { + Route(datagram ddp.Datagram, originating bool) error + Reply(datagram ddp.Datagram, rxPort port.Port, ddpType uint8, data []byte) PortsList() []port.Port + Zones() [][]byte +} + +// RouteIndex exposes the routing table to RTMP (which owns it) and to +// ZIP's sending path (which iterates known networks). Services that do +// not maintain or scan the routing table must not depend on this. +type RouteIndex interface { RoutingGetByNetwork(network uint16) (*RouteEntry, *bool) RoutingEntries() []struct { Entry *RouteEntry @@ -32,11 +53,27 @@ type Router interface { } RoutingConsider(entry *RouteEntry) bool RoutingMarkBad(networkMin, networkMax uint16) bool + RoutingTableAge() +} + +// ZoneIndex exposes the zone-information table to ZIP and to seed-zone +// registration during port startup. AddNetworksToZone is called by +// ports via anonymous-interface assertion at port-Start time, not +// through the service.Router contract. +type ZoneIndex interface { ZonesInNetworkRange(networkMin uint16, networkMax *uint16) ([][]byte, error) NetworksInZone(zoneName []byte) []uint16 - Zones() [][]byte AddNetworksToZone(zoneName []byte, networkMin uint16, networkMax *uint16) error - RoutingTableAge() +} + +// Router is the union every concrete router (router.Router) satisfies and +// that Service.Start receives. Services should narrow this to the +// capability subset they actually use as soon as it crosses into their +// own code — see zip and rtmp for the pattern. +type Router interface { + DatagramRouter + RouteIndex + ZoneIndex } type RouteEntry struct { diff --git a/service/zip/doc.go b/service/zip/doc.go new file mode 100644 index 0000000..4dd4684 --- /dev/null +++ b/service/zip/doc.go @@ -0,0 +1,8 @@ +// Package zip implements the AppleTalk Zone Information Protocol. +// +// It provides a RespondingService (answers ZIP queries on socket 6) +// and a SendingService (issues ZIP queries to discover zones for +// networks added by RTMP). +// +// See spec/06-zip.md and Inside AppleTalk 2/e §8. +package zip diff --git a/service/zip/mock_test.go b/service/zip/mock_test.go index 7e54be2..cc19368 100644 --- a/service/zip/mock_test.go +++ b/service/zip/mock_test.go @@ -1,91 +1,11 @@ package zip -import ( - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" +import "github.com/pgodw/omnitalk/internal/testutil" + +// Package-local aliases that let existing tests keep using the lowercase +// names. The real mocks live in internal/testutil so any future package +// with testing needs can share them. +type ( + mockPort = testutil.MockPort + mockRouter = testutil.MockRouter ) - -type mockPort struct { - shortStringFunc func() string - startFunc func(router port.RouterHooks) error - stopFunc func() error - unicastFunc func(network uint16, node uint8, datagram appletalk.Datagram) - broadcastFunc func(datagram appletalk.Datagram) - multicastFunc func(zoneName []byte, datagram appletalk.Datagram) - setNetworkRangeFunc func(networkMin, networkMax uint16) error - networkFunc func() uint16 - nodeFunc func() uint8 - networkMinFunc func() uint16 - networkMaxFunc func() uint16 - extendedNetworkFunc func() bool -} - -func (m *mockPort) ShortString() string { return m.shortStringFunc() } -func (m *mockPort) Start(router port.RouterHooks) error { return m.startFunc(router) } -func (m *mockPort) Stop() error { return m.stopFunc() } -func (m *mockPort) Unicast(network uint16, node uint8, datagram appletalk.Datagram) { - m.unicastFunc(network, node, datagram) -} -func (m *mockPort) Broadcast(datagram appletalk.Datagram) { m.broadcastFunc(datagram) } -func (m *mockPort) Multicast(zoneName []byte, datagram appletalk.Datagram) { - m.multicastFunc(zoneName, datagram) -} -func (m *mockPort) SetNetworkRange(networkMin, networkMax uint16) error { - return m.setNetworkRangeFunc(networkMin, networkMax) -} -func (m *mockPort) Network() uint16 { return m.networkFunc() } -func (m *mockPort) Node() uint8 { return m.nodeFunc() } -func (m *mockPort) NetworkMin() uint16 { return m.networkMinFunc() } -func (m *mockPort) NetworkMax() uint16 { return m.networkMaxFunc() } -func (m *mockPort) ExtendedNetwork() bool { return m.extendedNetworkFunc() } - -type mockRouter struct { - routeFunc func(datagram appletalk.Datagram, originating bool) error - replyFunc func(datagram appletalk.Datagram, rxPort port.Port, ddpType uint8, data []byte) - portsListFunc func() []port.Port - routingGetByNetworkFunc func(network uint16) (*service.RouteEntry, *bool) - routingEntriesFunc func() []struct { - Entry *service.RouteEntry - Bad bool - } - routingConsiderFunc func(entry *service.RouteEntry) bool - routingMarkBadFunc func(networkMin, networkMax uint16) bool - zonesInNetworkRangeFunc func(networkMin uint16, networkMax *uint16) ([][]byte, error) - networksInZoneFunc func(zoneName []byte) []uint16 - zonesFunc func() [][]byte - addNetworksToZoneFunc func(zoneName []byte, networkMin uint16, networkMax *uint16) error - routingTableAgeFunc func() -} - -func (m *mockRouter) Route(datagram appletalk.Datagram, originating bool) error { - return m.routeFunc(datagram, originating) -} -func (m *mockRouter) Reply(datagram appletalk.Datagram, rxPort port.Port, ddpType uint8, data []byte) { - m.replyFunc(datagram, rxPort, ddpType, data) -} -func (m *mockRouter) PortsList() []port.Port { return m.portsListFunc() } -func (m *mockRouter) RoutingGetByNetwork(network uint16) (*service.RouteEntry, *bool) { - return m.routingGetByNetworkFunc(network) -} -func (m *mockRouter) RoutingEntries() []struct { - Entry *service.RouteEntry - Bad bool -} { - return m.routingEntriesFunc() -} -func (m *mockRouter) RoutingConsider(entry *service.RouteEntry) bool { - return m.routingConsiderFunc(entry) -} -func (m *mockRouter) RoutingMarkBad(networkMin, networkMax uint16) bool { - return m.routingMarkBadFunc(networkMin, networkMax) -} -func (m *mockRouter) ZonesInNetworkRange(networkMin uint16, networkMax *uint16) ([][]byte, error) { - return m.zonesInNetworkRangeFunc(networkMin, networkMax) -} -func (m *mockRouter) NetworksInZone(zoneName []byte) []uint16 { return m.networksInZoneFunc(zoneName) } -func (m *mockRouter) Zones() [][]byte { return m.zonesFunc() } -func (m *mockRouter) AddNetworksToZone(zoneName []byte, networkMin uint16, networkMax *uint16) error { - return m.addNetworksToZoneFunc(zoneName, networkMin, networkMax) -} -func (m *mockRouter) RoutingTableAge() { m.routingTableAgeFunc() } diff --git a/service/zip/name_information.go b/service/zip/name_information.go index 4e7a2ab..51b85b2 100644 --- a/service/zip/name_information.go +++ b/service/zip/name_information.go @@ -2,21 +2,27 @@ package zip import ( "bytes" + "context" "sync" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" + "github.com/pgodw/omnitalk/protocol/ddp" + "github.com/pgodw/omnitalk/protocol/nbp" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/service" ) +// NBP wire-format constants are re-exported from protocol/nbp so the +// existing zip.NBPSASSocket / zip.NBPDDPType call sites stay valid. const ( - NBPSASSocket = 2 - NBPDDPType = 2 - nbpCtrlBrRq = 1 - nbpCtrlLkUp = 2 - nbpCtrlLkUpRply = 3 - nbpCtrlFwd = 4 + NBPSASSocket = nbp.SASSocket + NBPDDPType = nbp.DDPType + + nbpCtrlBrRq = nbp.CtrlBrRq + nbpCtrlLkUp = nbp.CtrlLkUp + nbpCtrlLkUpRply = nbp.CtrlLkUpRply + nbpCtrlFwd = nbp.CtrlFwd ) type NBPRegisteredName struct { @@ -28,10 +34,11 @@ type NBPRegisteredName struct { type NameInformationService struct { ch chan struct { - d appletalk.Datagram + d ddp.Datagram p port.Port } stop chan struct{} + wg sync.WaitGroup nameMu sync.RWMutex names []NBPRegisteredName } @@ -67,45 +74,18 @@ func (s *NameInformationService) UnregisterName(obj, typ, zone []byte) { } } -// nbpMatch returns true if pattern matches name: "=" is a wildcard. -func nbpMatch(pattern, name []byte) bool { - if len(pattern) == 1 && pattern[0] == '=' { - return true - } - return bytes.EqualFold(pattern, name) -} - -// nbpZoneMatch returns true when a BrRq/LkUp zone selector matches a -// registered zone. NBP uses "*" as the zone wildcard. -func nbpZoneMatch(pattern, zone []byte) bool { - if len(pattern) == 1 && pattern[0] == '*' { - return true - } - return bytes.EqualFold(pattern, zone) -} - -// buildLkUpRply constructs an NBP LkUp-Rply payload for a single matching name. +// nbpMatch / nbpZoneMatch / buildLkUpRply now live in protocol/nbp. +// We keep tiny shims so the rest of this file reads naturally. +func nbpMatch(pattern, name []byte) bool { return nbp.NameMatch(pattern, name) } +func nbpZoneMatch(pattern, zone []byte) bool { return nbp.ZoneMatch(pattern, zone) } func buildLkUpRply(nbpID byte, network uint16, node, socket uint8, obj, typ, zone []byte) []byte { - buf := make([]byte, 0, 12+len(obj)+len(typ)+len(zone)) - buf = append(buf, (nbpCtrlLkUpRply<<4)|1) - buf = append(buf, nbpID) - buf = append(buf, byte(network>>8), byte(network)) - buf = append(buf, node) - buf = append(buf, socket) - buf = append(buf, 0) // enum - buf = append(buf, byte(len(obj))) - buf = append(buf, obj...) - buf = append(buf, byte(len(typ))) - buf = append(buf, typ...) - buf = append(buf, byte(len(zone))) - buf = append(buf, zone...) - return buf + return nbp.BuildLkUpRply(nbpID, network, node, socket, obj, typ, zone) } func NewNameInformationService() *NameInformationService { return &NameInformationService{ ch: make(chan struct { - d appletalk.Datagram + d ddp.Datagram p port.Port }, 256), stop: make(chan struct{}), @@ -113,21 +93,29 @@ func NewNameInformationService() *NameInformationService { } func (s *NameInformationService) Socket() uint8 { return NBPSASSocket } -func (s *NameInformationService) Stop() error { close(s.stop); return nil } -func (s *NameInformationService) Inbound(d appletalk.Datagram, p port.Port) { +func (s *NameInformationService) Stop() error { + close(s.stop) + s.wg.Wait() + return nil +} +func (s *NameInformationService) Inbound(d ddp.Datagram, p port.Port) { select { case s.ch <- struct { - d appletalk.Datagram + d ddp.Datagram p port.Port }{d: d, p: p}: default: } } -func (s *NameInformationService) Start(r service.Router) error { +func (s *NameInformationService) Start(ctx context.Context, r service.Router) error { + s.wg.Add(1) go func() { + defer s.wg.Done() for { select { + case <-ctx.Done(): + return case <-s.stop: return case item := <-s.ch: @@ -138,52 +126,36 @@ func (s *NameInformationService) Start(r service.Router) error { return nil } -func (s *NameInformationService) handlePacket(d appletalk.Datagram, p port.Port, r service.Router) { - if d.DDPType != NBPDDPType || len(d.Data) < 12 { - return - } - funcTupleCount := d.Data[0] - f := funcTupleCount >> 4 - tupleCount := funcTupleCount & 0xF - if tupleCount != 1 || (f != nbpCtrlBrRq && f != nbpCtrlFwd && f != nbpCtrlLkUp) { - return - } - objLen := int(d.Data[7]) - if objLen < 1 || len(d.Data) < 8+objLen+1 { +func (s *NameInformationService) handlePacket(d ddp.Datagram, p port.Port, r service.Router) { + if d.DDPType != NBPDDPType { return } - typLen := int(d.Data[8+objLen]) - if typLen < 1 || len(d.Data) < 9+objLen+typLen+1 { + pkt, err := nbp.ParsePacket(d.Data) + if err != nil || pkt.TupleCount != 1 { return } - zoneLen := int(d.Data[9+objLen+typLen]) - if len(d.Data) < 10+objLen+typLen+zoneLen { + switch pkt.Function { + case nbpCtrlBrRq, nbpCtrlFwd, nbpCtrlLkUp: + default: return } - zone := d.Data[10+objLen+typLen : 10+objLen+typLen+zoneLen] - if len(zone) == 0 { - zone = []byte("*") - } - replyNet := uint16(d.Data[2])<<8 | uint16(d.Data[3]) + replyNet := pkt.Tuple.Network if replyNet == 0 { replyNet = p.Network() } - obj := d.Data[8 : 8+objLen] - typ := d.Data[9+objLen : 9+objLen+typLen] - - switch f { + switch pkt.Function { case nbpCtrlBrRq: - s.handleBrRq(d, p, r, obj, typ, zone, replyNet) + s.handleBrRq(d, p, r, pkt.Tuple.Object, pkt.Tuple.Type, pkt.Tuple.Zone, replyNet) case nbpCtrlFwd: - s.handleFwd(d, p, r, obj, typ, zone, replyNet) + s.handleFwd(d, p, r, pkt.Tuple.Object, pkt.Tuple.Type, pkt.Tuple.Zone, replyNet) case nbpCtrlLkUp: - s.handleLkUp(d, p, r, obj, typ, zone, replyNet) + s.handleLkUp(d, p, r, pkt.Tuple.Object, pkt.Tuple.Type, pkt.Tuple.Zone, replyNet) } } -func (s *NameInformationService) buildCommonPayload(d appletalk.Datagram, zone []byte, replyNet uint16) ([]byte, []byte) { +func (s *NameInformationService) buildCommonPayload(d ddp.Datagram, zone []byte, replyNet uint16) ([]byte, []byte) { objLen := int(d.Data[7]) typLen := int(d.Data[8+objLen]) @@ -202,7 +174,7 @@ func (s *NameInformationService) buildCommonPayload(d appletalk.Datagram, zone [ return lkup, fwd } -func (s *NameInformationService) handleBrRq(d appletalk.Datagram, p port.Port, r service.Router, obj, typ, zone []byte, replyNet uint16) { +func (s *NameInformationService) handleBrRq(d ddp.Datagram, p port.Port, r service.Router, obj, typ, zone []byte, replyNet uint16) { netlog.Debug("NBP BrRq on %s: obj=%q type=%q zone=%q reply=%d.%d.%d", p.ShortString(), obj, typ, zone, replyNet, d.Data[4], d.Data[5]) @@ -215,7 +187,7 @@ func (s *NameInformationService) handleBrRq(d appletalk.Datagram, p port.Port, r if nbpMatch(obj, n.Object) && nbpMatch(typ, n.Type) && nbpZoneMatch(zone, n.Zone) { rply := buildLkUpRply(nbpID, p.Network(), p.Node(), n.Socket, n.Object, n.Type, n.Zone) netlog.Debug("NBP BrRq: replying for registered name %q:%q@%q socket=%d", n.Object, n.Type, n.Zone, n.Socket) - _ = r.Route(appletalk.Datagram{ + _ = r.Route(ddp.Datagram{ DestinationNetwork: replyNet, DestinationNode: replyNode, DestinationSocket: replySock, @@ -249,7 +221,7 @@ func (s *NameInformationService) handleBrRq(d appletalk.Datagram, p port.Port, r if string(routeZone) == "*" { netlog.Debug("NBP BrRq: zone=* unresolved — broadcasting on %s", p.ShortString()) - p.Broadcast(appletalk.Datagram{ + p.Broadcast(ddp.Datagram{ DestinationNetwork: 0, SourceNetwork: p.Network(), DestinationNode: 0xFF, SourceNode: p.Node(), DestinationSocket: NBPSASSocket, SourceSocket: NBPSASSocket, DDPType: NBPDDPType, Data: lkup, }) @@ -269,13 +241,13 @@ func (s *NameInformationService) handleBrRq(d appletalk.Datagram, p port.Port, r seen[entry.Port] = struct{}{} if entry.Distance == 0 { netlog.Debug("NBP BrRq: sending LkUp to %s (network %d)", entry.Port.ShortString(), n) - entry.Port.Multicast(zone, appletalk.Datagram{ + entry.Port.Multicast(zone, ddp.Datagram{ DestinationNetwork: 0, SourceNetwork: entry.Port.Network(), DestinationNode: 0xFF, SourceNode: entry.Port.Node(), DestinationSocket: NBPSASSocket, SourceSocket: NBPSASSocket, DDPType: NBPDDPType, Data: lkup, }) } else { netlog.Debug("NBP BrRq: routing Fwd to network %d (distance %d)", entry.NetworkMin, entry.Distance) - _ = r.Route(appletalk.Datagram{ + _ = r.Route(ddp.Datagram{ DestinationNetwork: entry.NetworkMin, DestinationNode: 0x00, DestinationSocket: NBPSASSocket, SourceSocket: NBPSASSocket, DDPType: NBPDDPType, Data: fwd, }, true) @@ -284,7 +256,7 @@ func (s *NameInformationService) handleBrRq(d appletalk.Datagram, p port.Port, r } } -func (s *NameInformationService) handleFwd(d appletalk.Datagram, p port.Port, r service.Router, obj, typ, zone []byte, replyNet uint16) { +func (s *NameInformationService) handleFwd(d ddp.Datagram, p port.Port, r service.Router, obj, typ, zone []byte, replyNet uint16) { entry, _ := r.RoutingGetByNetwork(d.DestinationNetwork) if entry == nil || entry.Distance != 0 { return @@ -292,13 +264,13 @@ func (s *NameInformationService) handleFwd(d appletalk.Datagram, p port.Port, r lkup, _ := s.buildCommonPayload(d, zone, replyNet) - entry.Port.Multicast(zone, appletalk.Datagram{ + entry.Port.Multicast(zone, ddp.Datagram{ DestinationNetwork: 0, SourceNetwork: entry.Port.Network(), DestinationNode: 0xFF, SourceNode: entry.Port.Node(), DestinationSocket: NBPSASSocket, SourceSocket: NBPSASSocket, DDPType: NBPDDPType, Data: lkup, }) } -func (s *NameInformationService) handleLkUp(d appletalk.Datagram, p port.Port, r service.Router, obj, typ, zone []byte, replyNet uint16) { +func (s *NameInformationService) handleLkUp(d ddp.Datagram, p port.Port, r service.Router, obj, typ, zone []byte, replyNet uint16) { replyNode := d.Data[4] replySock := d.Data[5] nbpID := d.Data[1] @@ -318,7 +290,7 @@ func (s *NameInformationService) handleLkUp(d appletalk.Datagram, p port.Port, r for _, m := range matches { rply := buildLkUpRply(nbpID, p.Network(), p.Node(), m.Socket, m.Object, m.Type, m.Zone) netlog.Debug("NBP LkUp: replying with %q:%q@%q socket=%d", m.Object, m.Type, m.Zone, m.Socket) - _ = r.Route(appletalk.Datagram{ + _ = r.Route(ddp.Datagram{ DestinationNetwork: replyNet, DestinationNode: replyNode, DestinationSocket: replySock, diff --git a/service/zip/name_information_test.go b/service/zip/name_information_test.go index c548fe2..4490fe9 100644 --- a/service/zip/name_information_test.go +++ b/service/zip/name_information_test.go @@ -2,33 +2,33 @@ package zip import ( "bytes" + "context" "sync" "testing" "time" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/service" + "github.com/pgodw/omnitalk/internal/testutil" + "github.com/pgodw/omnitalk/protocol/ddp" + "github.com/pgodw/omnitalk/service" ) func newMockPort(network uint16, node uint8, shortString string, isExtended bool) *mockPort { - return &mockPort{ - networkFunc: func() uint16 { return network }, - nodeFunc: func() uint8 { return node }, - shortStringFunc: func() string { return shortString }, - extendedNetworkFunc: func() bool { return isExtended }, - broadcastFunc: func(datagram appletalk.Datagram) {}, - multicastFunc: func(zoneName []byte, datagram appletalk.Datagram) {}, - unicastFunc: func(network uint16, node uint8, datagram appletalk.Datagram) {}, - } + p := testutil.NewMockPort(network, node, shortString, isExtended) + p.BroadcastFunc = func(datagram ddp.Datagram) {} + p.MulticastFunc = func(zoneName []byte, datagram ddp.Datagram) {} + p.UnicastFunc = func(network uint16, node uint8, datagram ddp.Datagram) {} + return p } func newMockRouter() *mockRouter { - return &mockRouter{ - routeFunc: func(datagram appletalk.Datagram, originating bool) error { return nil }, - routingGetByNetworkFunc: func(network uint16) (*service.RouteEntry, *bool) { return nil, nil }, - zonesInNetworkRangeFunc: func(networkMin uint16, networkMax *uint16) ([][]byte, error) { return nil, nil }, - networksInZoneFunc: func(zoneName []byte) []uint16 { return nil }, - } + r := testutil.NewMockRouter() + r.RouteFunc = func(datagram ddp.Datagram, originating bool) error { return nil } + r.RoutingGetByNetworkFunc = func(network uint16) (*service.RouteEntry, *bool) { return nil, nil } + r.ZonesInNetworkRangeFunc = func(networkMin uint16, networkMax *uint16) ([][]byte, error) { + return nil, nil + } + r.NetworksInZoneFunc = func(zoneName []byte) []uint16 { return nil } + return r } func TestNameInformationService_BrRq(t *testing.T) { @@ -36,16 +36,16 @@ func TestNameInformationService_BrRq(t *testing.T) { r := newMockRouter() // Track routed packets - var routedPackets []appletalk.Datagram + var routedPackets []ddp.Datagram var mu sync.Mutex - r.routeFunc = func(datagram appletalk.Datagram, originating bool) error { + r.RouteFunc = func(datagram ddp.Datagram, originating bool) error { mu.Lock() routedPackets = append(routedPackets, datagram) mu.Unlock() return nil } - err := svc.Start(r) + err := svc.Start(context.Background(), r) if err != nil { t.Fatalf("Failed to start service: %v", err) } @@ -65,7 +65,7 @@ func TestNameInformationService_BrRq(t *testing.T) { 8, 'T', 'e', 's', 't', 'Z', 'o', 'n', 'e', } - d := appletalk.Datagram{ + d := ddp.Datagram{ DDPType: NBPDDPType, Data: data, } @@ -91,16 +91,16 @@ func TestNameInformationService_LkUp(t *testing.T) { r := newMockRouter() // Track routed packets - var routedPackets []appletalk.Datagram + var routedPackets []ddp.Datagram var mu sync.Mutex - r.routeFunc = func(datagram appletalk.Datagram, originating bool) error { + r.RouteFunc = func(datagram ddp.Datagram, originating bool) error { mu.Lock() routedPackets = append(routedPackets, datagram) mu.Unlock() return nil } - err := svc.Start(r) + err := svc.Start(context.Background(), r) if err != nil { t.Fatalf("Failed to start service: %v", err) } @@ -118,7 +118,7 @@ func TestNameInformationService_LkUp(t *testing.T) { 5, 'Z', 'o', 'n', 'e', '2', } - d := appletalk.Datagram{ + d := ddp.Datagram{ DDPType: NBPDDPType, Data: data, } @@ -143,16 +143,16 @@ func TestNameInformationService_LkUpZoneWildcard(t *testing.T) { svc := NewNameInformationService() r := newMockRouter() - var routedPackets []appletalk.Datagram + var routedPackets []ddp.Datagram var mu sync.Mutex - r.routeFunc = func(datagram appletalk.Datagram, originating bool) error { + r.RouteFunc = func(datagram ddp.Datagram, originating bool) error { mu.Lock() routedPackets = append(routedPackets, datagram) mu.Unlock() return nil } - err := svc.Start(r) + err := svc.Start(context.Background(), r) if err != nil { t.Fatalf("Failed to start service: %v", err) } @@ -170,7 +170,7 @@ func TestNameInformationService_LkUpZoneWildcard(t *testing.T) { 1, '*', } - d := appletalk.Datagram{DDPType: NBPDDPType, Data: data} + d := ddp.Datagram{DDPType: NBPDDPType, Data: data} svc.Inbound(d, p) time.Sleep(50 * time.Millisecond) @@ -194,17 +194,17 @@ func TestNameInformationService_Fwd(t *testing.T) { var multicastCalled bool var mu sync.Mutex - p.multicastFunc = func(zoneName []byte, datagram appletalk.Datagram) { + p.MulticastFunc = func(zoneName []byte, datagram ddp.Datagram) { mu.Lock() multicastCalled = true mu.Unlock() } - r.routingGetByNetworkFunc = func(network uint16) (*service.RouteEntry, *bool) { + r.RoutingGetByNetworkFunc = func(network uint16) (*service.RouteEntry, *bool) { return &service.RouteEntry{Distance: 0, Port: p}, nil } - err := svc.Start(r) + err := svc.Start(context.Background(), r) if err != nil { t.Fatalf("Failed to start service: %v", err) } @@ -217,7 +217,7 @@ func TestNameInformationService_Fwd(t *testing.T) { 5, 'Z', 'o', 'n', 'e', '3', } - d := appletalk.Datagram{ + d := ddp.Datagram{ DDPType: NBPDDPType, DestinationNetwork: 30, // Route matching Data: data, @@ -244,7 +244,7 @@ func TestNameInformationService_buildCommonPayload(t *testing.T) { 5, 'T', 'y', 'p', 'e', '1', 5, 'Z', 'o', 'n', 'e', '1', } - d := appletalk.Datagram{Data: data} + d := ddp.Datagram{Data: data} zone := []byte("Zone1") replyNet := uint16(10) @@ -279,7 +279,7 @@ func TestNameInformationService_handlePacket_invalidDDP(t *testing.T) { p := newMockPort(10, 15, "mock", false) // test invalid DDPType - d := appletalk.Datagram{ + d := ddp.Datagram{ DDPType: 99, Data: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, } @@ -287,7 +287,7 @@ func TestNameInformationService_handlePacket_invalidDDP(t *testing.T) { svc.handlePacket(d, p, r) // test length too short - d = appletalk.Datagram{ + d = ddp.Datagram{ DDPType: NBPDDPType, Data: []byte{0, 0, 0}, } diff --git a/service/zip/responding.go b/service/zip/responding.go index 14e3af3..03513a7 100644 --- a/service/zip/responding.go +++ b/service/zip/responding.go @@ -2,27 +2,32 @@ package zip import ( "bytes" + "context" "encoding/binary" + "sync" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/netlog" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" + "github.com/pgodw/omnitalk/pkg/encoding" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/netlog" + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/service" ) type RespondingService struct { ch chan struct { - d appletalk.Datagram + d ddp.Datagram p port.Port } stop chan struct{} pendingExtReply map[uint16]map[string]struct{} // network_min -> set of zone names + wg sync.WaitGroup } func NewRespondingService() *RespondingService { return &RespondingService{ ch: make(chan struct { - d appletalk.Datagram + d ddp.Datagram p port.Port }, 256), stop: make(chan struct{}), @@ -35,10 +40,14 @@ type multicastAddresser interface { MulticastAddress(zoneName []byte) []byte } -func (s *RespondingService) Start(r service.Router) error { +func (s *RespondingService) Start(ctx context.Context, r service.Router) error { + s.wg.Add(1) go func() { + defer s.wg.Done() for { select { + case <-ctx.Done(): + return case <-s.stop: return case item := <-s.ch: @@ -85,11 +94,15 @@ func (s *RespondingService) Start(r service.Router) error { return nil } -func (s *RespondingService) Stop() error { close(s.stop); return nil } -func (s *RespondingService) Inbound(d appletalk.Datagram, p port.Port) { +func (s *RespondingService) Stop() error { + close(s.stop) + s.wg.Wait() + return nil +} +func (s *RespondingService) Inbound(d ddp.Datagram, p port.Port) { select { case s.ch <- struct { - d appletalk.Datagram + d ddp.Datagram p port.Port }{d: d, p: p}: default: @@ -97,7 +110,7 @@ func (s *RespondingService) Inbound(d appletalk.Datagram, p port.Port) { } // handleReply processes ZIP_FUNC_REPLY: immediately commit each (network, zone) tuple. -func (s *RespondingService) handleReply(r service.Router, d appletalk.Datagram, _ bool) { +func (s *RespondingService) handleReply(r service.Router, d ddp.Datagram, _ bool) { data := d.Data[2:] for len(data) >= 3 { nmin := binary.BigEndian.Uint16(data[0:2]) @@ -124,7 +137,7 @@ func (s *RespondingService) handleReply(r service.Router, d appletalk.Datagram, // handleExtReply processes ZIP_FUNC_EXT_REPLY: accumulate tuples until we have the // expected count before committing. -func (s *RespondingService) handleExtReply(r service.Router, d appletalk.Datagram) { +func (s *RespondingService) handleExtReply(r service.Router, d ddp.Datagram) { if len(d.Data) < 2 { return } @@ -167,7 +180,7 @@ func (s *RespondingService) handleExtReply(r service.Router, d appletalk.Datagra } // handleQuery responds to ZIP_FUNC_QUERY. -func handleQuery(r service.Router, d appletalk.Datagram, rx port.Port) { +func handleQuery(r service.Router, d ddp.Datagram, rx port.Port) { if len(d.Data) < 2 { return } @@ -192,7 +205,7 @@ func handleQuery(r service.Router, d appletalk.Datagram, rx port.Port) { binary.BigEndian.PutUint16(item[0:2], entry.NetworkMin) item[2] = byte(len(z)) copy(item[3:], z) - if len(buf)+len(item) > appletalk.MaxDataLength { + if len(buf)+len(item) > ddp.MaxDataLength { r.Reply(d, rx, DDPType, buf) buf = []byte{FuncExtReply, byte(len(zones))} } @@ -205,7 +218,7 @@ func handleQuery(r service.Router, d appletalk.Datagram, rx port.Port) { } // handleGetNetInfo responds to ZIP_FUNC_GETNETINFO_REQUEST. -func handleGetNetInfo(r service.Router, d appletalk.Datagram, rx port.Port) { +func handleGetNetInfo(r service.Router, d ddp.Datagram, rx port.Port) { if rx.Network() == 0 || rx.NetworkMin() == 0 || rx.NetworkMax() == 0 { return } @@ -274,7 +287,7 @@ func handleGetNetInfo(r service.Router, d appletalk.Datagram, rx port.Port) { } // handleGetMyZone responds to ATP GetMyZone. -func handleGetMyZone(r service.Router, d appletalk.Datagram, rx port.Port) { +func handleGetMyZone(r service.Router, d ddp.Datagram, rx port.Port) { tid := binary.BigEndian.Uint16(d.Data[2:4]) entry, _ := r.RoutingGetByNetwork(d.SourceNetwork) if entry == nil { @@ -295,7 +308,7 @@ func handleGetMyZone(r service.Router, d appletalk.Datagram, rx port.Port) { } // handleGetZoneList responds to ATP GetZoneList / GetLocalZones. -func handleGetZoneList(r service.Router, d appletalk.Datagram, rx port.Port, local bool) { +func handleGetZoneList(r service.Router, d ddp.Datagram, rx port.Port, local bool) { tid := binary.BigEndian.Uint16(d.Data[2:4]) startIndex := int(binary.BigEndian.Uint16(d.Data[6:8])) // 1-relative @@ -327,7 +340,7 @@ func handleGetZoneList(r service.Router, d appletalk.Datagram, rx port.Port, loc numZones := 0 const atpHdrLen = 8 for i, zone := range zones { - if atpHdrLen+len(zoneList)+1+len(zone) > appletalk.MaxDataLength { + if atpHdrLen+len(zoneList)+1+len(zone) > ddp.MaxDataLength { break } zoneList = append(zoneList, byte(len(zone))) @@ -348,5 +361,5 @@ func handleGetZoneList(r service.Router, d appletalk.Datagram, rx port.Port, loc // toUCase uses the centralized MacRoman case-fold from the appletalk package. func toUCase(input []byte) []byte { - return appletalk.MacRomanToUpper(input) + return encoding.MacRomanToUpper(input) } diff --git a/service/zip/sending.go b/service/zip/sending.go index 33ea4e6..542f32a 100644 --- a/service/zip/sending.go +++ b/service/zip/sending.go @@ -1,28 +1,36 @@ package zip import ( + "context" + "sync" "time" - "github.com/pgodw/omnitalk/go/appletalk" - "github.com/pgodw/omnitalk/go/port" - "github.com/pgodw/omnitalk/go/service" + "github.com/pgodw/omnitalk/protocol/ddp" + + "github.com/pgodw/omnitalk/port" + "github.com/pgodw/omnitalk/service" ) type SendingService struct { timeout time.Duration stop chan struct{} + wg sync.WaitGroup } func NewSendingService() *SendingService { return &SendingService{timeout: 10 * time.Second, stop: make(chan struct{})} } -func (s *SendingService) Start(r service.Router) error { +func (s *SendingService) Start(ctx context.Context, r service.Router) error { + s.wg.Add(1) go func() { + defer s.wg.Done() t := time.NewTicker(s.timeout) defer t.Stop() for { select { + case <-ctx.Done(): + return case <-s.stop: return case <-t.C: @@ -37,12 +45,12 @@ func (s *SendingService) Start(r service.Router) error { } data := []byte{FuncQuery, 1, byte(e.NetworkMin >> 8), byte(e.NetworkMin)} if e.Distance == 0 { - e.Port.Broadcast(appletalk.Datagram{ + e.Port.Broadcast(ddp.Datagram{ DestinationNetwork: 0, SourceNetwork: e.Port.Network(), DestinationNode: 0xFF, SourceNode: e.Port.Node(), DestinationSocket: SAS, SourceSocket: SAS, DDPType: DDPType, Data: data, }) } else { - e.Port.Unicast(e.NextNetwork, e.NextNode, appletalk.Datagram{ + e.Port.Unicast(e.NextNetwork, e.NextNode, ddp.Datagram{ DestinationNetwork: e.NextNetwork, SourceNetwork: e.Port.Network(), DestinationNode: e.NextNode, SourceNode: e.Port.Node(), DestinationSocket: SAS, SourceSocket: SAS, DDPType: DDPType, Data: data, }) @@ -54,5 +62,10 @@ func (s *SendingService) Start(r service.Router) error { return nil } -func (s *SendingService) Stop() error { close(s.stop); return nil } -func (s *SendingService) Inbound(_ appletalk.Datagram, _ port.Port) {} +func (s *SendingService) Stop() error { + close(s.stop) + s.wg.Wait() + return nil +} + +func (s *SendingService) Inbound(_ ddp.Datagram, _ port.Port) {} diff --git a/service/zip/zip.go b/service/zip/zip.go index 8e337fb..5bc1787 100644 --- a/service/zip/zip.go +++ b/service/zip/zip.go @@ -1,23 +1,26 @@ package zip +import pzip "github.com/pgodw/omnitalk/protocol/zip" + +// Wire constants re-exported from protocol/zip. const ( - SAS = 6 - DDPType = 6 - FuncQuery = 1 - FuncReply = 2 - FuncGetNetInfoReq = 5 - FuncGetNetInfoRep = 6 - FuncExtReply = 8 + SAS = pzip.SAS + DDPType = pzip.DDPType + FuncQuery = pzip.FuncQuery + FuncReply = pzip.FuncReply + FuncGetNetInfoReq = pzip.FuncGetNetInfoReq + FuncGetNetInfoRep = pzip.FuncGetNetInfoRep + FuncExtReply = pzip.FuncExtReply - GetNetInfoZoneInvalid = 0x80 - GetNetInfoUseBroadcast = 0x40 - GetNetInfoOnlyOneZone = 0x20 + GetNetInfoZoneInvalid = pzip.GetNetInfoZoneInvalid + GetNetInfoUseBroadcast = pzip.GetNetInfoUseBroadcast + GetNetInfoOnlyOneZone = pzip.GetNetInfoOnlyOneZone - ATPDDPType = 3 - ATPFuncTReq = 0x40 - ATPFuncTResp = 0x80 - ATPEOM = 0x10 - ATPGetMyZone = 7 - ATPGetZoneList = 8 - ATPGetLocalZoneList = 9 + ATPDDPType = pzip.ATPDDPType + ATPFuncTReq = pzip.ATPFuncTReq + ATPFuncTResp = pzip.ATPFuncTResp + ATPEOM = pzip.ATPEOM + ATPGetMyZone = pzip.ATPGetMyZone + ATPGetZoneList = pzip.ATPGetZoneList + ATPGetLocalZoneList = pzip.ATPGetLocalZoneList )