diff --git a/.air.windows.toml b/.air.windows.toml
deleted file mode 100644
index cde5de4d..00000000
--- a/.air.windows.toml
+++ /dev/null
@@ -1,67 +0,0 @@
-# .air.windows.toml
-#
-# A Windows specific config file for Air, the live reload server for Go apps.
-# https://github.com/cosmtrek/air
-#
-# $ go install github.com/cosmtrek/air@latest
-# $ air -c .air.windows.toml
-
-# Working directory
-# . or absolute path, please note that the directories following must be under root.
-root = "."
-tmp_dir = "tmp"
-
-[build]
-# Just plain old shell command. You could use `make` as well.
-cmd = "go build -o ./tmp/main.exe ."
-# Binary file yields from `cmd`.
-bin = "tmp\\main.exe"
-# Customize binary, can setup environment variables when run your app.
-full_bin = ""
-
-# Watch these filename extensions.
-include_ext = ["go", "tpl", "tmpl", "html"]
-# Ignore these filename extensions or directories.
-exclude_dir = ["dist", "tmp", "vendor", ".git"]
-# Watch these directories if you specified.
-include_dir = []
-# Watch these files.
-include_file = []
-# Exclude files.
-exclude_file = []
-# Exclude specific regular expressions.
-exclude_regex = ["_test\\.go"]
-# Exclude unchanged files.
-exclude_unchanged = true
-# Follow symlink for directories
-follow_symlink = true
-
-# This log file places in your tmp_dir.
-log = "air.log"
-# It's not necessary to trigger build each time file changes if it's too frequent.
-delay = 500 # ms
-# Stop running old binary when build errors occur.
-stop_on_error = true
-# Send Interrupt signal before killing process (windows does not support this feature)
-send_interrupt = false
-# Delay after sending Interrupt signal
-kill_delay = 500 # ms
-# Rerun binary or not
-rerun = false
-# Delay after each executions
-rerun_delay = 500
-
-[log]
-# Show log time
-time = false
-
-[color]
-# Customize each part's color. If no color found, use the raw app log.
-main = "magenta"
-watcher = "cyan"
-build = "yellow"
-runner = "green"
-
-[misc]
-# Delete tmp directory on exit
-clean_on_exit = true
diff --git a/.vscode/settings.json b/.vscode/settings.json
index eed09832..3f5b0cc0 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -10,9 +10,10 @@
},
"editor.formatOnSave": true,
"files.associations": {
+ "*.env.local": "ini",
"*.gohtml": "html",
- "*.tmpl": "html",
- "*.env.local": "ini"
+ "*.service": "ini",
+ "*.tmpl": "html"
},
"files.eol": "\n",
"runOnSave.statusMessageTimeout": 3000,
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..f288702d
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. Steve Wozniak and Steve Jobs release the Apple I, a single-board computer with a " +
+ Content: " Steve Wozniak and Steve Jobs released The Apple Computer, later rebranded as the Apple I. It was a single-board device for electronic hobbyists with a " +
"MOS 6502 CPU, 4KB of RAM, and a 40-column display controller. Unlike the more popular and earlier Altair 8800, the Apple Computer wasn't usable out of the box and didn't come with a case. However, it did offer a convenient video terminal, cassette, and keyboard interface, requiring owners to supply peripherals for output, storage, and input." +
- " The choice of the new, powerful, and affordable MOS 6502 CPU showed foresight, as it later became the basis of far more successful microcomputer and consoles. " +
+ " Unlike the far more popular Altair 8800, The Apple Computer wasn't usable out of the box and didn't come with a case. However, it did offer a convenient video terminal, cassette, and keyboard interface, but requires owners to supply peripherals for input, output, and storage. The board is a commercial failure, selling less than 200 units, and could be considered more of a prototype for the company and third-party investors. The following year, the product line was replaced with circuit boards housing an Apple II. The choice of the new MOS 6502 CPU showed foresight, as it became the foundation of many successful microcomputers and consoles. " +
ul0 +
"
IBM and Microsoft's later PC-DOS / MS-DOS took a lot of inspiration[1] from CP/M and supplanted " + - "it as the dominant, open hardware, microcomputing operating system.
" + + "IBM's PC-DOS and later, Microsoft's MS-DOS took a lot of inspiration from CP/M[1], " + + "with DOS supplanting it as the dominant, open hardware, microcomputing operating system.
" + sect0 + - "The Apple II, Commodore PET and TRS-80 are released, the first microcomputers to be readily available to the public. " + - "By the end of the year, a potential customer in the USA could walk into a mall or retail shop and walk out with a complete personal computer, ready to use.
" + - "The MOS 6502 CPU 1975 is found in the Commodore PET[1] and the Apple II.
" +
- "While Zilog Z-80 1976 is in use with the TRS-80[2].
The Commodore PET, Apple II, and the Tandy TRS-80 " + + "became the first successful microcomputers marketed to a mainstream consumer rather than an electronics hobbyist. " + + "By the end of the year, a potential customer in the USA could walk into a mall or specialist retail shop and walk out with a complete personal computer ready to use. However, in 1977, things began slowly for Commodore and Apple.
" + + "Commodore PET Personal Electronic TransactorCommodore was the first to announce its machine in January at CES, but shipping only occurred in mid-October. Even then, the numbers were tiny, with the end-of-year batches reaching just 500 boxed machines.
" + + "Apple IIApple didn't fare much better, as its revenue until the end of September 1977 was just USD 774,000, which includes sales of both the Apple I and the mid-April launch of the Apple II. " +
+ "Its December 1980 stock perspective states, Net sales in fiscal 1977 occurred primarily in the fourth fiscal quarter and consisted principally of sales of the basic Apple II mainframe computer.
" +
+ "Given the expensive Apple II is priced at $1300-2600, the number of machines sold could have been in the hundreds.
The Tandy fared considerably better. It was announced at the end of July and priced from $400 or $500, including a display. " + + "It was widely available nationally through the thousands of RadioShack retail stores, and took 10,000 unit orders in the first month, birthing the microcomputer revolution!
" + + "CPUsThe MOS 6502 CPU 1975 is found in the Commodore PET and the Apple II.
" +
+ "The Zilog Z-80 1976 is in use with the TRS-80.
In the early days of the BBS, the mainstream computer press paid attention to boards, " + "including write-ups" + - "[2] and listings of the phone numbers for known underground boards." + + "[2] and listings of the phone numbers for known underground boards.
" + // Sherwood Forest + "Sherwood ForestA very early, underground ABBS is the 1979-1981 New Jersey-based[3] board, Sherwood Forest, created by Magnetic Surfer. " + - "It runs off a floppy disc and a Micromodem and became a hub for some active telephone hackers who were early adopters of microcomputers in the New York Tri-state area—many became Scene pirates and notorious computer phreakers and hackers." + + "It runs off a floppy disc and a Micromodem and became a hub for some active telephone hackers who were early adopters of microcomputers in the New York Tri-state area—many became Scene pirates and notorious computer phreakers and hackers.
" + // Modem over Manhattan - "As its name suggests, MOM, or Modem Over Manhattan (+212-245-4363), was based in Manhattan, New York, and probably went online in 1980. " +
+ "Modem Over Manhattan
" +
+ "
As its name suggests, MOM, or Modem Over Manhattan (+212-245-4363, +212-912-9141), was based in Manhattan, New York, and probably went online in 1980. " + "It is another famous open board with lax rules that was popular with the New York phreak community.
" + // Pirate Trek + "Pirate-TrekA very early pirate board, the original Pirate-Trek out of New York (+914-634-1268), possibly run by the famed Apple II cracker Krakowicz, " + "was first announced in 1981.
" + // 8BBS + "8BBSThere is also the renowned 8BBS out of San Jose, CA, which ran on a PDP-8 minicomputer " + "in 1980-82 and has a separate article.
" + sect0 + @@ -299,21 +311,32 @@ func Collection() Milestones { }, { Title: "The first crackers", Year: 1979, Highlight: true, - Lead: "?", - Content: "We have yet to learn when or who started cracking, but it must have been after disk copy protection became common on Apple II software. " + - "Andrew McFadden has written about early copy protection on specific Apple II games released on cassette tapes in 1978 and 1979, but these were probably unusual.
" + + Content: "We have yet to learn when or who started cracking, but it must have been after discovering disk copy protection in Apple II software. " + + "Andrew McFadden wrote about early copy protection on cassette tapes. " + + "This form of copy protection was uncommon, but the games include Microchess 2 from Personal Software, Module 6 from Softape in 1978, and 1979's Sargon II from Hayden.
" + // disk ii drive "However, the July 1978 retail debut of the Disk II floppy drive with the first " + "Apple operating system was a significant point. " + - "For the moneyed Apple II hobbyists, the drive and software became a must-have piece of kit that significantly improved the functionality of their machines and quickly caught on.
" + + "For the moneyed Apple II hobbyists, the drive and software became a must-have piece of kit that significantly improved the functionality of their machines and quickly caught on. " + // disk copy protection - "The drive offered new benefits for software developers, including speed and reliability and complete control of the floppy drive hardware using software that the developers could write themselves. " + + "The drive offered new benefits for software developers, including speed and reliability and complete control of the floppy drive hardware using software that the developers could write themselves. " + "This ability encouraged them to embed disk copy protection methods into software that are " + "still problematic for computer historians today!
" + + // yahtzee + "A computerized version of the popular board game Yahtzee was completed in April 1978 and published by Apple Computer. " +
+ "The original media seems lost, but the surviving digital image has been noted as being cracked
due to its loader message, Yahtzee - for the moose!
. " +
+ "But is the modification a copy protection crack or simply a note to a friend written years after the publish date?
Dunjonquest Temple of Apshai from Automated Simulations could be one of the oldest titles with disk copy protection. " + + "However, the game has been reprinted a few times under the Epyx branding, which complicates things. " + + "The second reprint from 1980 included a title screen and possibly disk copy protection, but the first edition with a (c) 1979 Automated Simulations notice seems free of copy protection? " + + "It is also unsure " + + "if the first Apple edition was available in 1979 or more likely, later in 1980.
" + // unbroken quote + "A December 1980 the post on 8BBS from Brain Litzinger[1] includes," +
- "I also have unbroken: Galaxion, Dogfight, Hi-res shootout, and Astro-Apple
.
The casual use of unbroken in the post indicates that knowledge of cracking or removing disk copy protection was already commonplace, at least among the online, underground communities.
" + + "I also have unbroken: Galaxion, Dogfight, Hi-res shootout, and Astro-Apple. " + + "The casual use of unbroken in the post indicates that knowledge of cracking or removing disk copy protection was already commonplace, at least among the online, underground communities." + // lock smith ad. "
In Christmas 1980, Omega Software Systems was advertising Lock Smith, " +
"a disk copy program that makes a bit-by-bit copy, claiming duplication of just about any disk is possible.
The advertising suggests that disk copy protection was already problematic for Apple II owners who desired software backups and that there was a product market. " +
@@ -329,58 +352,70 @@ func Collection() Milestones {
sect1,
},
{
- Title: "The birth of warez", Year: 1980, Highlight: true,
- Lead: "The Apple II ?", Link: "http://artscene.textfiles.com/intros/APPLEII/", LinkTitle: "and browse the Apple II crack screens",
- Content: "
When was the birth of Wares?[1] There's no clear answer to this question, but a good guess would be sometime in 1980 within the established technology hubs of the USA. " + - "By then, microcomputer owners exchanged real-life details to meet up, duplicate and exchange software collections, and find ways to remove copy protections.
" + + Title: "The birth of wares", Year: 1980, Highlight: true, + Lead: "The Apple II", Link: "http://artscene.textfiles.com/intros/APPLEII/", LinkTitle: "and browse the Apple II crack screens", + Content: // kids with micros + "Without good software[2], the expensive microcomputers of the era were but mostly pointless machines. " + + "Getting them online with modems was challenging[5]. " + + "So understandably, the computer owners who were into microcomputing would befriend like-minded people to exchange information and share software.
" + // apple modems - "1979-1980 saw the sale of the first Apple II modem peripherals, the Hayes Micromodem II and the Novation CAT. " + - "These modem additions enabled microcomputer owners to connect to electronic message boards, communicate, and even exchange files remotely using the telephone.
" + + "1979 saw the sale of the first Apple II modem peripheral, the Hayes Micromodem II and later, the Novation CAT. " + + "These modems and the development of usable modem software such as ASCII Express in 1980, enabled Apple owners to connect to electronic message boards, communicate, and even exchange files remotely using the telephone.
" + // telephone costs - "One problem with the telephone was the cost; explicitly making calls outside the caller's local area was charged by the minute. " + - "So, combining a slow microcomputer with an even slower modem communication device often led to a costly phone bill. But long-distance " + - "phone phreaking had been a well-established underground movement, " + - " allowing callers to trick the phone network into misbilling or giving away long-distance phone calls.
" + - // kids with micros - "Microcomputers of the era were expensive, exotic, but mostly useless machines[2]. " + - "Getting them online with modems was challenging. " + - "So understandably, the kids who already phone phreaked and were into microcomputing would befriend like-minded people who shared these niche pursuits and then collaborate to exchange software, information, and ideas.
" + + "One problem with telephones was that the expense of making calls outside the caller's local area was charged by the minute. " + + "So, combining a slow microcomputer with an even slower modem on the phone network often led to a prohibitively costly phone bill. But " + + "phone phreaking had been a well-established, anti-corporate movement, " + + " allowing callers to trick a phone company into misbilling or giving away expensive, long-distance phone calls.
" + + // birth of warez + "So when was the birth of wares[1] and a Warez scene? " + + "There's no exact answer, but a good guess would be sometime in 1980 in the USA, maybe in the New York tri-state area, Massachusetts, California, or elsewhere. " + + "By then, microcomputer owners exchanged details to meet in real life and online to duplicate and exchange software collections. And, importantly, to find ways to remove Apple II disk copy protections and show off the results. " + // warez dating - "
Regarding Warez, some pirates on the Apple II were dating their cracks[3] in early 1982, " + - "but many modified, tagged crack screens exist for games published in 1980[4] and 1981. " + - "While a copyright year doesn't always mean the game crack is from the same period, it is a fair assumption.
" + + "The pirates, also often identified as phone phreaks, removed or cracked disk copy protection on the Apple II and were dating their activity towards the end of 1980[4] and in 1981. " + + "Likewise, many modified,cracked, or
brokeningame title screens exist for games published in those years." + // other platforms "
As for the other microcomputer platforms, the far more popular " + - "TRS-80 from Radio Shack had a modem peripheral available at the end of 1978. " + - "However, there is no evidence of an underground culture developing on this machine. A modem didn't exist on the " + + "TRS-80 from Tandy had a modem peripheral available at the end of 1978. " + + "However, there is no evidence of an underground culture developing on the machine. A modem didn't exist on the " + "Atari 400/800 until 1981, and the famous Commodore 64 was years away.
" + sect0 + "safter the dictionary spelling.
killer appfor the Apple II, VisiCalc," + - " the first spreadsheet for microcomputers, was only released in the last few months of 1979.
killer app, was only published in the last few months of 1979.
(C) CRACKED 1982crack screen.
The Untouchables - " +
- "The Apple Mafia - " +
- "The Dirty Dozen
[1][2][3]
In 1986, Red Ghost posted The Apple Mafia Story, claiming these were some of the first-ever pirate groups. " +
- "He grew up in Queens, New York, and suggests that is where many original
phreakers and pirates originated. " +
- "But we know that statement is incorrect, as in the 1970s, people nationwide were already phone freaking.
Various discussions on groups from the Apple II era suggest they existed in 1981 or even 1980. " + + "Yet, from the irregular cracked Scene releases that exist online today, the earliest groups only have releases from 1982 onwards. " + + "While there are many 1980 and 1981 cracks, the surviving evidence says they all were released from individuals rather than collectives.
" + + "Famed groups, Super Pirates of Minneapolis, The Apple Mafia, The Software Pirates, Digital Gang, The Dirty Dozen, Untouchables, and Apple Pirated Program Library Exchange all have releases for games published in 1982.
" + + "The Apple Mafia
" +
+ "In 1986, Red Ghost posted The Apple Mafia Story, claiming " +
+ "The Untouchables[1], The Apple Mafia[2], and The Dirty Dozen[3] " +
+ "were some of the first-ever pirate groups. But he admits he wasn't there and wasn't even into computers then. He grew up in Queens, New York, and suggests that is where many original
phreakers and pirates originated. " +
+ "But we know in the 1970s, people nationwide were already phone freaking, and the pirate groups mentioned hit their stride in 1982-83.
In the same post, an early 1984 quote from The Godfather states he founded The Apple Mafia in 1980, initially as a joke, but it became a more serious project in 1981.
" + - "In the same post, an early 1984 quote from The Godfather states he founded The Apple Mafia in 1980, initially as a joke, but it became a more serious project in 1981. Strangely, Godfather states that it is the oldest active group rather than simply the oldest group. " +
+ "BRIEF HISTORY OF THE APPLE MAFIA. FOUNDED IN 1980 BY THE GODFATHER AS A JOKE. REDONE IN 1981 AS A SEMI SERIOUS GROUP. " +
+ "
BRIEF HISTORY OF THE APPLE MAFIA. FOUNDED IN 1980 BY THE GODFATHER AS A JOKE. REDONE IN 1981 AS A SEMI SERIOUS GROUP. " +
"KICKED SOME ASS IN '82. BLEW EVERYONE AWAY IN 83, AND WILL DO MUCH BETTER IN 84. SINCE THE BEGINNING THE GROUP HAS DIED OUT AND BEEN REBORN SEVERAL TIMES, THIS TIME LETS KEEP IT GOING. " +
- "IS CURRENTLY THE OLDEST ACTIVE GROUP
[7]
Phrack Magazine issue 42 has a 1993 interview with Lord Digital, who attempts to clarify the Apple Mafia founding." +
" I played around with various things, ... until " +
@@ -388,16 +423,19 @@ func Collection() Milestones {
"starting to get into computers, most of them comprising the main attendees of " +
"the soon-to-be-defunct TAP[4] meetings in NYC, a pretty eclectic collection of " +
"dudes who have long since gone their separate ways to meet with whatever " +
- "destinies life had in store for them.
" +
+ "Around 1980[5] there was an Apple Fest that " +
+ "destinies life had in store for them. Around 1980 there was an Apple Fest that " +
"we went to, and found even more people with Apples and, from this, formed the " +
"Apple Mafia, which was, in our minds, really cool sounding and actually became " +
"the first WAreZ gRoUP to exist for the Apple II.
However, the first AppleFest was held in Boston on the weekend of June 6-7, 1981[5]. " + + "Given the inconsistencies in the various stories about The Apple Mafia, it is safe to suggest that they were an early group from late 1981.
" + // super pirates "Super Pirates of Minneapolis[6]" +
"
The Super Pirates were a famous, early group outside of New York. " +
- "Below is an uncredited quote from the Red-Sector-A BBS log file from 1987. " +
- "The year might be misremembered. However, it suggests the Super Pirates were around in 1980, the same year the game Cyber Strike was published. " +
- "
The 1st ware I got was back in 1980. It was Cyber Strike. Along with about 35 other disks, most cracked by the Super Pirates!
The 1st ware I got was back in 1980. It was Cyber Strike. Along with about 35 other disks, most cracked by the Super Pirates!
" +
+ "The quote is from Pirate History by The Incognito reposted on the Red Sector A BBS (313) 591-1024 and found in the Board Simulations 2 text from 1987.
Anecdotal evidence suggests the Super Pirates were involved in the first-ever BBS bust. The members left to form or joined the Midwest Pirate's Guild, " + "a group strongly associated with the cracker Apple Bandit and his Minneapolis-based board, The Safehouse (+612-724-7066).
" + @@ -413,18 +451,17 @@ func Collection() Milestones { div1 + "For the first time ever, a computer show devoted exclusively to the Apple computers. Applefest '81advert in the April 1981 issue of Washington Apple Pi.
active grouprather than just the oldest group.
In San Jose, CA, 8BBS (+408-296-5799) came online in March 1980. It is one of the first electronic message boards," + - " which early microcomputer hobbyists used, including posts by some early hackers, pirates, and named-drop phreaker personalities of the era[6]. " + + " which early microcomputer hobbyists used, including posts by some early hackers, pirates, and named-drop phreaker personalities of the era[1]. " + // message logs "But what stands out about the board today, we have surviving, thousands of posts from the earliest open online community that anyone in 1980 with the proper hardware could access from home. " + "These posts existed before Reddit, the web, Usenet, and the Internet.
" + @@ -439,7 +476,7 @@ func Collection() Milestones { "The earliest-dated crack is probably on the Apple II. An example is " +
+ "Cyber Strike broken by The Tornato
in November 1980 and published by Sirius Software. " +
+ "The static crack credit and text art is loaded at the start of the game before the game's title screen.
Signed as an amendment to law by President Jimmy Carter, computer programs are defined by copyright law and enable authors to control the copying, selling, and leasing of their software.
" + "But the law was confusing as software documentation and software source code are protected, but the object code or the compiled software that ran on the computer hardware is probably not.
", }, + { + Title: "The earliest cracktro", Year: 1981, Highlight: true, + Lead: "STARBLASTER cracked by: Mr. Xerox, from 1981 ?", + Content: "A cracktro or crack-intro definition should be an introduction advertising the crackers of a pirated software release. " + + "So,broken bytexts and hacked game title screens probably do not apply to this example." + + // apple ii + "
Unfortunately, it is challenging to date early pirated releases for the PC, Commodore 64, or Apple II. " +
+ "Many crackers didn't date their releases, and the systems themselves didn't track time or stamp the files. " +
+ "But given the proliferation of broken by
texts and graphic hacks in 1980, 1981 and 1982 on the Apple II in the USA, the early cracktro probably evolved here.
The prolific, early Apple cracker Mr. Xerox probably created one of the first introductions and scrollers in his animated " + + "crack by introduction " + + "for Star Blaster (c) 1981, which you can compare to the original opening.
" + + // others + "Or cracker Copycatter may have created the first scroller in a release of Pro Football (c) 1982.
" + + "While younger, the February 1984 Black Belt release is from The Apple Mafia is a candidate for an early crack-intro, given it is animated, timestamp and from a well-known group.
" + + "Penqueriel Mazes by Electronic Dimension initially looked like a candidate, but the intro-loader effects are far too modern for the (c) 1982
notice.
Microsoft Adventure is an IBM PC port of the text game Colossal Cave Adventure.
" + "Adventure was a highly influential and popular text-only adventuring game of exploration and puzzle solving for mainframe computers of the 1970s. " + @@ -536,7 +621,7 @@ func Collection() Milestones { }, { Title: "The first demo", Year: 1981, Month: 12, Highlight: true, - Lead: "Untitled Christmas greeting ?", + Lead: "Merry Christmas CB'81 ?", LinkTitle: "the Demozoo entry with a YouTube link", Link: "https://demozoo.org/productions/144652/", Content: "
The earliest known demo or demonstration program is probably this great but untitled animated Christmas greeting created on the Atari 400 or 800 and signed as CB'81
. " +
@@ -554,31 +639,6 @@ func Collection() Milestones {
Png: "cb-81.png",
},
},
- {
- Title: "Earliest dated crack", Year: 1982, Month: 3,
- Lead: "Dung Beetles breakage by Black Bart ?",
- LinkTitle: "and view the Dung Beetle crack", Link: "http://artscene.textfiles.com/intros/APPLEII/dungbeetles.gif",
- Content: "The earliest dated or timestamped crack is probably on the Apple II, " +
- "Dung Beetles broken by Black Bart in March 1982
. " +
- "
A screenshot of an earlier serial key exists, Broken by The Pirate 09/26/81, " + - "apparently for Crush Crumble & Chomp, but the capture lacks context.
" + - "Broken, unprotected or cracked were common verbs used in this era to describe the removal of disk copy protection.
" + - "Other early dated cracks include" + - ul0 + - "broken bytexts or static crack screen images do not count." + - // apple ii - "
Unfortunately, it is challenging to date early pirated releases for the PC, Commodore 64, or Apple II. Many crackers didn't date their releases, and the systems themselves didn't track time or stamp the files. " +
- "But given the proliferation of broken by
texts and graphic hacks in 1980-81 and 1982 on the Apple II in the USA, the early crack intro probably evolved here.
The prolific, early Apple cracker Mr. Xerox probably created one of the first intro and scrollers in his crack screen " + - "for Star Blaster (c) 1981, which you can compare to the original opening.
" + - // others - "Or cracker Copycatter may have created the first scroller in a release of Pro Football (c) 1982. " +
- "While younger, the February 1984 Black Belt release is from The Apple Mafia is a candidate for an early intro, given it has a timestamp and from a well-known group. " +
- "Penqueriel Mazes by Electronic Dimension initially looked like a candidate, but the intro-loader effects are far too modern for the (c) 1982
notice.
Many long argued in the Demoscene that a 1982
" +
"Berlin Cracking Service image [1][2] of the Berlin Bear was the first cracktro. " +
"But this seems far-fetched, and anecdotal proof suggests it originates from 1984.
But even taking the claim at its face value, back in late 1982 and selling at $595[14], the Commodore 64 was a pricey machine that targeted business users in the USA and Japan. " + + "
But even taking the claim at its face value, back in late 1982 and selling at $595[14], the Commodore 64 was a pricey machine that targeted business users in the USA and Japan. " + "Due to last-minute design changes and poor quality assurance issues, the machine had limited distribution and software that year. [3][4]
" + // germany and uk "By all accounts, the Commodore Braunschweig factory didn't have the European PAL Commodore 64 machines " + @@ -634,21 +674,19 @@ func Collection() Milestones { "
Interface adaptors will allow the use of a complete range of hardware peripherals including disk units, plotter, dot matrix and daisy wheel printers, Prestel communications, networking and much, much more.
" +
"A complete range of business software including word processing, information handling, financial modelling, accounting and many more specific application packages will be available.
West Berlin was an isolated city deep within the Soviet-controlled Deutsche Demokratische Republik, and its economy depended on mass subsidies from the West German Federal Republic. " + - "It is unlikely that several kids from here had early access to the European PAL Commodore 64 in 1982. " + - "It is more believable that the kids formed these Berlin cracking groups a year or so later, in late 1983 or 1984, " + + "
West Berlin was an isolated city deep within the Soviet-controlled East German Democratic Republic, and its economy depended on mass subsidies from the West German Federal Republic. " + + "It is unlikely that several kids from here had early access to the European PAL Commodore 64 at the end 1982. " + + "It is more believable that the kids formed these Berlin-based cracking groups a year later, in Christmas/New Years 1983-84, " + "after the Commodore 64 dropped massively in price and became readily available.
" + // citations - "citations" + + "citations [8]" + ul0 + "The first intro was a picture of the Berlin Bear from the city flag and was released by BCS in 1982. " + "It was a kind of co-production by several people...[9]
A positive moment for me was when the father of a classmate who was working for Apple, was able to through his connections, " + - "obtain a C64 on Christmas 1982... Plutonium Crackers 2001 (PC) was born.[10]
Some of our close friends/posse in Berlin started their C64 scene-careers nearly at the same time. " + "I'm speaking of Cracking Force Berlin (CFB)... and Berlin Cracking Service (BCS).[11]
We were primarily cracking games from 1982 until late 1987.[12]
Copying games wasn't really illegal in most countries back in 1982 or 1983. [13] ... Most early releases weren'tcracked, they were just released or spread.
We were primarily cracking games from 1982 until late 1987.[12]
Copying games wasn't really illegal in most countries back in 1982 or 1983. ... Most early releases weren'tcracked, they were just released or spread.
diskcopy protection. Yet the German manual for the VC-1541 floppy disk drive is dated June 1983, which suggests it didn't sell in Germany until the latter half of 1983. Other early noteworthy titles on the Commodore 64 came on cartridges.
diskcopy protection. The German manual for the VC-1541 floppy disk drive is dated June 1983, which suggests it didn't sell in Germany until the latter half of 1983. Other early noteworthy titles on the Commodore 64 came on cartridges.
I need some file to put the info about the crack in. Hmmm.. Info, NFO!, and that was it." + "
Notes from each file.
" + + "Notes from BUBBLE.NFO
" + "Bubble Bobble by Nova Logic Through Taito" + - "
Broken by Fabulous Furlough
Normal Taito Loader - 5 minutes
Notes from KNIGHTS.NFO
" + "Knights of Legend by Origin Systems", }, { diff --git a/handler/app/template.go b/handler/app/template.go index a520f09a..9c1852f2 100644 --- a/handler/app/template.go +++ b/handler/app/template.go @@ -189,7 +189,7 @@ func LinkRelations(val string) template.HTML { func (web Templ) ImageSample(unid string) template.HTML { ext, name, src := "", "", "" for _, ext = range []string{webp, png} { - name = filepath.Join(web.Environment.PreviewDir, unid+ext) + name = filepath.Join(web.Environment.AbsPreview, unid+ext) src = strings.Join([]string{config.StaticOriginal(), unid + ext}, "/") if helper.Stat(name) { break @@ -218,10 +218,10 @@ func (web Templ) Screenshot(unid, desc string) template.HTML { srcJ := strings.Join([]string{config.StaticOriginal(), unid + jpg}, separator) srcA := strings.Join([]string{config.StaticOriginal(), unid + avif}, separator) - sizeA := helper.Size(filepath.Join(web.Environment.PreviewDir, unid+avif)) - sizeJ := helper.Size(filepath.Join(web.Environment.PreviewDir, unid+jpg)) - sizeP := helper.Size(filepath.Join(web.Environment.PreviewDir, unid+png)) - sizeW := helper.Size(filepath.Join(web.Environment.PreviewDir, unid+webp)) + sizeA := helper.Size(filepath.Join(web.Environment.AbsPreview, unid+avif)) + sizeJ := helper.Size(filepath.Join(web.Environment.AbsPreview, unid+jpg)) + sizeP := helper.Size(filepath.Join(web.Environment.AbsPreview, unid+png)) + sizeW := helper.Size(filepath.Join(web.Environment.AbsPreview, unid+webp)) useLegacyJpg := sizeJ > 0 && sizeJ < sizeA && sizeJ < sizeP && sizeJ < sizeW if useLegacyJpg { @@ -551,8 +551,8 @@ func (web *Templ) Templates() (map[string]*template.Template, error) { // The unid is the filename of the thumbnail image without an extension. // The desc is the description of the image. func (web Templ) Thumb(unid, desc string, bottom bool) template.HTML { - fw := filepath.Join(web.Environment.ThumbnailDir, unid+webp) - fp := filepath.Join(web.Environment.ThumbnailDir, unid+png) + fw := filepath.Join(web.Environment.AbsThumbnail, unid+webp) + fp := filepath.Join(web.Environment.AbsThumbnail, unid+png) webp := strings.Join([]string{config.StaticThumb(), unid + webp}, "/") png := strings.Join([]string{config.StaticThumb(), unid + png}, "/") alt := strings.ToLower(desc) + " thumbnail" @@ -597,7 +597,7 @@ func (web Templ) ThumbSample(unid string) template.HTML { ) ext, name, src := "", "", "" for _, ext = range []string{webp, png} { - name = filepath.Join(web.Environment.ThumbnailDir, unid+ext) + name = filepath.Join(web.Environment.AbsThumbnail, unid+ext) src = strings.Join([]string{config.StaticThumb(), unid + ext}, "/") if helper.Stat(name) { break @@ -624,13 +624,13 @@ func (web Templ) tmpl(name filename) *template.Template { GlobTo("pagination.tmpl"), } config := web.Environment - files = lockTmpls(config.ReadMode, files...) + files = lockTmpls(config.ReadOnly, files...) offline := web.RecordCount < 1 - files = dbTmpls(config.ReadMode, offline, files...) + files = dbTmpls(config.ReadOnly, offline, files...) // append any additional and embedded templates switch name { case "artifact.tmpl": - files = artifactTmpls(config.ReadMode, files...) + files = artifactTmpls(config.ReadOnly, files...) case "categories.tmpl": files = append(files, GlobTo("categoriesmore.tmpl")) case "websites.tmpl": diff --git a/handler/handler.go b/handler/handler.go index 90b997ac..7cca28da 100644 --- a/handler/handler.go +++ b/handler/handler.go @@ -20,7 +20,6 @@ import ( "os" "os/signal" "runtime" - "strings" "time" "github.com/Defacto2/server/cmd" @@ -28,7 +27,6 @@ import ( "github.com/Defacto2/server/handler/download" "github.com/Defacto2/server/handler/html3" "github.com/Defacto2/server/handler/htmx" - "github.com/Defacto2/server/handler/middleware/br" "github.com/Defacto2/server/internal/config" "github.com/Defacto2/server/internal/helper" "github.com/labstack/echo/v4" @@ -87,9 +85,6 @@ func (c Configuration) Controller(logger *zap.SugaredLogger) *echo.Echo { middleware.Rewrite(rewrites()), middleware.NonWWWRedirect(), } - if httpsRedirect := configs.HTTPSRedirect && configs.TLSPort > 0; httpsRedirect { - middlewares = append(middlewares, middleware.HTTPSRedirect()) - } e.Pre(middlewares...) // ************************************************* @@ -103,14 +98,11 @@ func (c Configuration) Controller(logger *zap.SugaredLogger) *echo.Echo { c.NoCrawl, middleware.RemoveTrailingSlashWithConfig(configRTS()), } - switch strings.ToLower(configs.Compression) { - case "gzip": + if configs.Compression { middlewares = append(middlewares, middleware.Gzip()) - case "br": - middlewares = append(middlewares, br.Brotli()) } - if configs.ProductionMode { - middlewares = append(middlewares, middleware.Recover()) // recover from panics + if configs.ProdMode { + middlewares = append(middlewares, middleware.Recover()) } e.Use(middlewares...) @@ -118,8 +110,8 @@ func (c Configuration) Controller(logger *zap.SugaredLogger) *echo.Echo { e = MovedPermanently(e) e = htmxGroup(e, logger, - c.Environment.ProductionMode, - c.Environment.DownloadDir) + c.Environment.ProdMode, + c.Environment.AbsDownload) e, err := c.FilesRoutes(e, logger, c.Public) if err != nil { logger.Fatal(err) @@ -152,14 +144,12 @@ func EmbedDirs(e *echo.Echo, currentFs fs.FS) *echo.Echo { } // Info prints the application information to the console. -func (c Configuration) Info(logger *zap.SugaredLogger) { - w := bufio.NewWriter(os.Stdout) +func (c Configuration) Info(logger *zap.SugaredLogger, w io.Writer) { nr := bytes.NewReader(c.Brand) if l, err := io.Copy(w, nr); err != nil { logger.Warnf("Could not print the brand logo: %s.", err) } else if l > 0 { fmt.Fprint(w, "\n\n") - w.Flush() } fmt.Fprintf(w, " %s.\n", cmd.Copyright()) @@ -175,7 +165,6 @@ func (c Configuration) Info(logger *zap.SugaredLogger) { // // All additional feedback should go in internal/config/check.go (c *Config) Checks() // - w.Flush() } // PortErr handles the error when the HTTP or HTTPS server cannot start. @@ -186,7 +175,7 @@ func (c Configuration) PortErr(logger *zap.SugaredLogger, port uint, err error) } var portErr *net.OpError switch { - case !c.Environment.ProductionMode && errors.As(err, &portErr): + case !c.Environment.ProdMode && errors.As(err, &portErr): logger.Infof("air or task server could not start (this can probably be ignored): %s.", err) case errors.Is(err, net.ErrClosed), errors.Is(err, http.ErrServerClosed): @@ -231,11 +220,6 @@ func (c *Configuration) ShutdownHTTP(e *echo.Echo, logger *zap.SugaredLogger) { waitDuration := ShutdownWait waitCount := ShutdownCounter ticker := 1 * time.Second - if c.Environment.LocalMode { - waitDuration = 0 - waitCount = 0 - ticker = 1 * time.Millisecond // this cannot be zero - } ctx, cancel := context.WithTimeout(context.Background(), waitDuration) defer func() { const alert = "Detected Ctrl + C, server will shutdown" @@ -308,38 +292,49 @@ func (c *Configuration) StartHTTP(e *echo.Echo, logger *zap.SugaredLogger) { panic(ErrRoutes) } port := c.Environment.HTTPPort - if port == 0 { + address := c.address(port) + if address == "" { return } - address := fmt.Sprintf(":%d", port) if err := e.Start(address); err != nil { c.PortErr(logger, port, err) } } +func (c *Configuration) address(port uint) string { + if port == 0 { + return "" + } + address := fmt.Sprintf(":%d", port) + if c.Environment.MatchHost != "" { + address = fmt.Sprintf("%s:%d", c.Environment.MatchHost, port) + } + return address +} + // StartTLS starts the encrypted TLS web server. func (c *Configuration) StartTLS(e *echo.Echo, logger *zap.SugaredLogger) { if e == nil { panic(ErrRoutes) } port := c.Environment.TLSPort - if port == 0 { + address := c.address(port) + if address == "" { return } - cert := c.Environment.TLSCert - key := c.Environment.TLSKey + certFile := c.Environment.TLSCert + keyFile := c.Environment.TLSKey const failure = "Could not start the TLS server" - if cert == "" || key == "" { + if certFile == "" || keyFile == "" { logger.Fatalf("%s, missing certificate or key file.", failure) } - if !helper.File(cert) { - logger.Fatalf("%s, certificate file does not exist: %s.", failure, cert) + if !helper.File(certFile) { + logger.Fatalf("%s, certificate file does not exist: %s.", failure, certFile) } - if !helper.File(key) { - logger.Fatalf("%s, key file does not exist: %s.", failure, key) + if !helper.File(keyFile) { + logger.Fatalf("%s, key file does not exist: %s.", failure, keyFile) } - address := fmt.Sprintf(":%d", port) - if err := e.StartTLS(address, "", ""); err != nil { + if err := e.StartTLS(address, certFile, keyFile); err != nil { c.PortErr(logger, port, err) } } @@ -351,29 +346,21 @@ func (c *Configuration) StartTLSLocal(e *echo.Echo, logger *zap.SugaredLogger) { panic(ErrRoutes) } port := c.Environment.TLSPort - if port == 0 { + address := c.address(port) + if address == "" { return } const cert, key = "public/certs/cert.pem", "public/certs/key.pem" const failure = "Could not read the internal localhost" - cpem, err := c.Public.ReadFile(cert) + certB, err := c.Public.ReadFile(cert) if err != nil { logger.Fatalf("%s, TLS certificate: %s.", failure, err) } - kpem, err := c.Public.ReadFile(key) + keyB, err := c.Public.ReadFile(key) if err != nil { logger.Fatalf("%s, TLS key: %s.", failure, err) } - lock := strings.TrimSpace(c.Environment.TLSHost) - var address string - const showAllConnections = "" - switch lock { - case showAllConnections: - address = fmt.Sprintf(":%d", port) - default: - address = fmt.Sprintf("%s:%d", lock, port) - } - if err := e.StartTLS(address, cpem, kpem); err != nil { + if err := e.StartTLS(address, certB, keyB); err != nil { c.PortErr(logger, port, err) } } @@ -382,7 +369,7 @@ func (c *Configuration) StartTLSLocal(e *echo.Echo, logger *zap.SugaredLogger) { func (c Configuration) downloader(cx echo.Context, logger *zap.SugaredLogger) error { d := download.Download{ Inline: false, - Path: c.Environment.DownloadDir, + Path: c.Environment.AbsDownload, } if err := d.HTTPSend(cx, logger); err != nil { return fmt.Errorf("d.HTTPSend: %w", err) diff --git a/handler/handler_test.go b/handler/handler_test.go index 09e2c344..bf1f9610 100644 --- a/handler/handler_test.go +++ b/handler/handler_test.go @@ -14,7 +14,7 @@ import ( func TestRegister(t *testing.T) { t.Parallel() c := handler.Configuration{} - logger := zaplog.CLI().Sugar() + logger := zaplog.Status().Sugar() tr, err := c.Registry(logger) assert.Nil(t, tr) require.Error(t, err) diff --git a/handler/htmx/transfer.go b/handler/htmx/transfer.go index bb56f1c3..9bf2f9c1 100644 --- a/handler/htmx/transfer.go +++ b/handler/htmx/transfer.go @@ -460,9 +460,9 @@ func submit(c echo.Context, logger *zap.SugaredLogger, prod string) error { var key int64 switch prod { case dz: - key, err = model.InsertDemozoo(ctx, db, int64(id)) + key, err = model.InsertDemozoo(ctx, db, id) case pt: - key, err = model.InsertPouet(ctx, db, int64(id)) + key, err = model.InsertPouet(ctx, db, id) } if err != nil || key == 0 { logger.Error(err, id) diff --git a/handler/middleware.go b/handler/middleware.go index 04d87876..2c0fc915 100644 --- a/handler/middleware.go +++ b/handler/middleware.go @@ -30,7 +30,7 @@ func (c Configuration) NoCrawl(next echo.HandlerFunc) echo.HandlerFunc { } return func(e echo.Context) error { const HeaderXRobotsTag = "X-Robots-Tag" - e.Response().Header().Set(HeaderXRobotsTag, "noindex, nofollow") + e.Response().Header().Set(HeaderXRobotsTag, "none") return next(e) } } @@ -39,9 +39,9 @@ func (c Configuration) NoCrawl(next echo.HandlerFunc) echo.HandlerFunc { // of the database and any related user interface. func (c Configuration) ReadOnlyLock(next echo.HandlerFunc) echo.HandlerFunc { return func(e echo.Context) error { - s := strconv.FormatBool(c.Environment.ReadMode) + s := strconv.FormatBool(c.Environment.ReadOnly) e.Response().Header().Set("X-Read-Only-Lock", s) - if c.Environment.ReadMode { + if c.Environment.ReadOnly { if err := app.StatusErr(e, http.StatusForbidden, ""); err != nil { return fmt.Errorf("app.StatusErr: %w", err) } @@ -94,17 +94,17 @@ func configRTS() middleware.TrailingSlashConfig { // based on the application configuration. The logger is set to the CLI // logger for development mode and the Production logger for production mode. func (c Configuration) configZapLogger() middleware.RequestLoggerConfig { - if !c.Environment.LogRequests { + if !c.Environment.LogAll { return middleware.RequestLoggerConfig{ LogValuesFunc: func(_ echo.Context, _ middleware.RequestLoggerValues) error { return nil }, } } - logger := zaplog.CLI().Sugar() - if c.Environment.ProductionMode { - root := c.Environment.LogDir - logger = zaplog.Production(root).Sugar() + logger := zaplog.Status().Sugar() + if c.Environment.ProdMode { + root := c.Environment.AbsLog + logger = zaplog.Store(root).Sugar() } defer func() { _ = logger.Sync() diff --git a/handler/middleware/br/br.go b/handler/middleware/br/br.go deleted file mode 100644 index 82b844a6..00000000 --- a/handler/middleware/br/br.go +++ /dev/null @@ -1,134 +0,0 @@ -// Package br provides experimental brotli support for the Echo web framework. -package br - -import ( - "bufio" - "fmt" - "io" - "net" - "net/http" - "strings" - - "github.com/andybalholm/brotli" - "github.com/labstack/echo/v4" - "github.com/labstack/echo/v4/middleware" -) - -// -// Brotli compression scheme, copied from the submission the delaneyj commented on 22 Feb 2019. -// https://github.com/labstack/echo/blob/a327810ef8a5625797ca6a106b538e5abec3917e/middleware/compress_brotli.go -// - -const ( - BrotliScheme = "br" // Brotli compression header scheme. -) - -var ErrHijack = echo.NewHTTPError(http.StatusInternalServerError, "response could not be hijacked") - -// Brotli returns a middleware which compresses HTTP response using brotli compression -// scheme. -func Brotli() echo.MiddlewareFunc { - return BrotliWithConfig(DefaultBrotliConfig()) -} - -// BrotliWithConfig returns the [Brotli] middleware with config. -func BrotliWithConfig(config BrotliConfig) echo.MiddlewareFunc { - if config.Skipper == nil { - config.Skipper = DefaultBrotliConfig().Skipper - } - if config.Level == 0 { - config.Level = DefaultBrotliConfig().Level - } - - return func(next echo.HandlerFunc) echo.HandlerFunc { - return func(c echo.Context) error { - if config.Skipper(c) { - return next(c) - } - - resp := c.Response() - resp.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding) - if strings.Contains(c.Request().Header.Get(echo.HeaderAcceptEncoding), BrotliScheme) { - resp.Header().Set(echo.HeaderContentEncoding, BrotliScheme) // Issue #806 - rw := resp.Writer - w := brotli.NewWriterOptions(rw, brotli.WriterOptions{Quality: config.Level}) - defer func() { - if resp.Size == 0 { - if resp.Header().Get(echo.HeaderContentEncoding) == BrotliScheme { - resp.Header().Del(echo.HeaderContentEncoding) - } - // We have to reset response to it's pristine state when - // nothing is written to body or error is returned. - // See issue #424, #407. - resp.Writer = rw - w.Reset(io.Discard) - } - w.Close() - }() - grw := &brotliResponseWriter{Writer: w, ResponseWriter: rw} - resp.Writer = grw - } - return next(c) - } - } -} - -// DefaultBrotliConfig is the default Brotli middleware config. -func DefaultBrotliConfig() BrotliConfig { - return BrotliConfig{ - Skipper: middleware.DefaultSkipper, - Level: brotli.DefaultCompression, - } -} - -type ( - // BrotliConfig defines the config for Brotli middleware. - BrotliConfig struct { - // Skipper defines a function to skip middleware. - Skipper middleware.Skipper - - // Brotli compression level. - // Optional. Default value -1. - Level int `yaml:"level"` - } - - brotliResponseWriter struct { - io.Writer - http.ResponseWriter - } -) - -func (w *brotliResponseWriter) WriteHeader(code int) { - if code == http.StatusNoContent { // Issue #489 - w.ResponseWriter.Header().Del(echo.HeaderContentEncoding) - } - w.Header().Del(echo.HeaderContentLength) // Issue #444 - w.ResponseWriter.WriteHeader(code) -} - -func (w *brotliResponseWriter) Write(b []byte) (int, error) { - if w.Header().Get(echo.HeaderContentType) == "" { - w.Header().Set(echo.HeaderContentType, http.DetectContentType(b)) - } - i, err := w.Writer.Write(b) - if err != nil { - return 0, fmt.Errorf("brotli.Writer.Write: %w", err) - } - return i, nil -} - -func (w *brotliResponseWriter) Flush() { - if writer, writerExists := w.Writer.(*brotli.Writer); writerExists { - writer.Flush() - } - if flusher, flusherExists := w.ResponseWriter.(http.Flusher); flusherExists { - flusher.Flush() - } -} - -func (w *brotliResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if hijacker, hijackerExists := w.ResponseWriter.(http.Hijacker); hijackerExists { - return hijacker.Hijack() //nolint:wrapcheck - } - return nil, nil, ErrHijack -} diff --git a/handler/middleware/br/br_test.go b/handler/middleware/br/br_test.go deleted file mode 100644 index 0358384e..00000000 --- a/handler/middleware/br/br_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package br_test - -import ( - "bytes" - "io" - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/Defacto2/server/handler/middleware/br" - "github.com/andybalholm/brotli" - "github.com/labstack/echo/v4" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestBrotli(t *testing.T) { - e := echo.New() - req := httptest.NewRequest(http.MethodGet, "/", nil) - rec := httptest.NewRecorder() - c := e.NewContext(req, rec) - // Skip if no Accept-Encoding header - h := br.Brotli()(func(c echo.Context) error { - _, _ = c.Response().Write([]byte("test")) // For Content-Type sniffing - return nil - }) - err := h(c) - require.NoError(t, err) - assert := assert.New(t) - assert.Equal("test", rec.Body.String()) - // Brotli - req = httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, br.BrotliScheme) - rec = httptest.NewRecorder() - c = e.NewContext(req, rec) - err = h(c) - require.NoError(t, err) - assert.Equal(br.BrotliScheme, rec.Header().Get(echo.HeaderContentEncoding)) - assert.Contains(rec.Header().Get(echo.HeaderContentType), echo.MIMETextPlain) - r := brotli.NewReader(rec.Body) - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(r) - require.NoError(t, err) - assert.Equal("test", buf.String()) - chunkBuf := make([]byte, 5) - // Brotli chunked - req = httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, br.BrotliScheme) - rec = httptest.NewRecorder() - c = e.NewContext(req, rec) - err = br.Brotli()(func(c echo.Context) error { - c.Response().Header().Set("Content-Type", "text/event-stream") - c.Response().Header().Set("Transfer-Encoding", "chunked") - // Write and flush the first part of the data - _, err = c.Response().Write([]byte("test\n")) - require.NoError(t, err) - c.Response().Flush() - // Read the first part of the data - assert.True(rec.Flushed) - assert.Equal(br.BrotliScheme, rec.Header().Get(echo.HeaderContentEncoding)) - err := r.Reset(rec.Body) - require.NoError(t, err) - _, err = io.ReadFull(r, chunkBuf) - require.NoError(t, err) - assert.Equal("test\n", string(chunkBuf)) - // Write and flush the second part of the data - _, err = c.Response().Write([]byte("test\n")) - require.NoError(t, err) - c.Response().Flush() - _, err = io.ReadFull(r, chunkBuf) - require.NoError(t, err) - assert.Equal("test\n", string(chunkBuf)) - // Write the final part of the data and return - _, err = c.Response().Write([]byte("test")) - require.NoError(t, err) - return nil - })(c) - require.NoError(t, err) - buf = new(bytes.Buffer) - _, err = buf.ReadFrom(r) - require.NoError(t, err) - assert.Equal("test", buf.String()) -} - -func TestBrotliNoContent(t *testing.T) { - e := echo.New() - req := httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, br.BrotliScheme) - rec := httptest.NewRecorder() - c := e.NewContext(req, rec) - h := br.Brotli()(func(c echo.Context) error { - return c.NoContent(http.StatusNoContent) - }) - if assert.NoError(t, h(c)) { - assert.Empty(t, rec.Header().Get(echo.HeaderContentEncoding)) - assert.Empty(t, rec.Header().Get(echo.HeaderContentType)) - assert.Empty(t, len(rec.Body.Bytes())) - } -} - -func TestBrotliErrorReturned(t *testing.T) { - e := echo.New() - e.Use(br.Brotli()) - e.GET("/", func(_ echo.Context) error { - return echo.ErrNotFound - }) - req := httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, br.BrotliScheme) - rec := httptest.NewRecorder() - e.ServeHTTP(rec, req) - assert.Equal(t, http.StatusNotFound, rec.Code) - assert.Empty(t, rec.Header().Get(echo.HeaderContentEncoding)) -} - -// Issue #806. -func TestBrotliWithStatic(t *testing.T) { - e := echo.New() - e.Use(br.Brotli()) - e.Static("/test", "../../../public/image/layout") - req := httptest.NewRequest(http.MethodGet, "/test/favicon-152x152.png", nil) - req.Header.Set(echo.HeaderAcceptEncoding, br.BrotliScheme) - rec := httptest.NewRecorder() - e.ServeHTTP(rec, req) - assert.Equal(t, http.StatusOK, rec.Code) - // Data is written out in chunks when Content-Length == "", so only - // validate the content length if it's not set. - if cl := rec.Header().Get("Content-Length"); cl != "" { - assert.Equal(t, cl, rec.Body.Len()) - } - r := brotli.NewReader(rec.Body) - - want, err := os.ReadFile("../../../public/image/layout/favicon-152x152.png") - if assert.NoError(t, err) { - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(r) - require.NoError(t, err) - assert.EqualValues(t, want, buf.Bytes()) - } -} diff --git a/handler/router.go b/handler/router.go index 1b9c503a..8b191084 100644 --- a/handler/router.go +++ b/handler/router.go @@ -35,9 +35,9 @@ func (c Configuration) FilesRoutes(e *echo.Echo, logger *zap.SugaredLogger, publ app.Caching.Records(c.RecordCount) dir := app.Dirs{ - Download: c.Environment.DownloadDir, - Preview: c.Environment.PreviewDir, - Thumbnail: c.Environment.ThumbnailDir, + Download: c.Environment.AbsDownload, + Preview: c.Environment.AbsPreview, + Thumbnail: c.Environment.AbsThumbnail, } nonce, err := c.nonce(e) @@ -64,7 +64,7 @@ func (c Configuration) nonce(e *echo.Echo) (string, error) { if e == nil { panic(ErrRoutes) } - if c.Environment.ReadMode { + if c.Environment.ReadOnly { return "", nil } b, err := helper.CookieStore(c.Environment.SessionKey) @@ -124,8 +124,8 @@ func (c Configuration) static(e *echo.Echo) *echo.Echo { if e == nil { panic(ErrRoutes) } - e.Static(config.StaticThumb(), c.Environment.ThumbnailDir) - e.Static(config.StaticOriginal(), c.Environment.PreviewDir) + e.Static(config.StaticThumb(), c.Environment.AbsThumbnail) + e.Static(config.StaticOriginal(), c.Environment.AbsPreview) return e } @@ -146,7 +146,7 @@ func (c Configuration) debugInfo(e *echo.Echo) *echo.Echo { if e == nil { panic(ErrRoutes) } - if c.Environment.ProductionMode { + if c.Environment.ProdMode { return e } @@ -190,6 +190,9 @@ func (c Configuration) website(e *echo.Echo, logger *zap.SugaredLogger, dir app. if e == nil { panic(ErrRoutes) } + e.GET("/health-check", func(c echo.Context) error { + return c.NoContent(http.StatusOK) + }) s := e.Group("") s.GET("/", app.Index) s.GET("/artist", app.Artist) @@ -198,11 +201,11 @@ func (c Configuration) website(e *echo.Echo, logger *zap.SugaredLogger, dir app. s.GET("/bbs/year", app.BBSYear) s.GET("/coder", app.Coder) s.GET(Downloader, func(cx echo.Context) error { - return app.Download(cx, logger, c.Environment.DownloadDir) + return app.Download(cx, logger, c.Environment.AbsDownload) }) s.GET("/f/:id", func(cx echo.Context) error { dir.URI = cx.Param("id") - return dir.Artifact(cx, logger, c.Environment.ReadMode) + return dir.Artifact(cx, logger, c.Environment.ReadOnly) }) s.GET("/file/stats", func(cx echo.Context) error { return app.Categories(cx, logger, true) @@ -263,7 +266,7 @@ func (c Configuration) website(e *echo.Echo, logger *zap.SugaredLogger, dir app. }) s.GET("/writer", app.Writer) s.GET("/v/:id", func(cx echo.Context) error { - return app.Inline(cx, logger, c.Environment.DownloadDir) + return app.Inline(cx, logger, c.Environment.AbsDownload) }) return e } @@ -335,8 +338,6 @@ func MovedPermanently(e *echo.Echo) *echo.Echo { panic(ErrRoutes) } e = nginx(e) - e = retired(e) - e = wayback(e) e = fixes(e) return e } @@ -347,8 +348,8 @@ func nginx(e *echo.Echo) *echo.Echo { panic(ErrRoutes) } nginx := e.Group("") - nginx.GET("/welcome", func(c echo.Context) error { - return c.Redirect(code, "/") + nginx.GET("/file/detail/:id", func(c echo.Context) error { + return c.Redirect(code, "/f/"+c.Param("id")) }) nginx.GET("/file/download/:id", func(c echo.Context) error { return c.Redirect(code, "/d/"+c.Param("id")) @@ -356,221 +357,12 @@ func nginx(e *echo.Echo) *echo.Echo { nginx.GET("/file/view/:id", func(c echo.Context) error { return c.Redirect(code, "/v/"+c.Param("id")) }) - nginx.GET("/apollo-x/fc.htm", func(c echo.Context) error { - return c.Redirect(code, "/wayback/apollo-x-demo-resources-1999-december-17/fc.htm") - }) - nginx.GET("/bbs.cfm", func(c echo.Context) error { - return c.Redirect(code, "/bbs") - }) - nginx.GET("/contact.cfm", func(c echo.Context) error { - return c.Redirect(code, "/") // there's no dedicated contact page - }) - nginx.GET("/cracktros.cfm", func(c echo.Context) error { - return c.Redirect(code, "/files/intro") - }) - nginx.GET("/cracktros-detail.cfm:/:id", func(c echo.Context) error { - return c.Redirect(code, "/f/"+c.Param("id")) - }) - nginx.GET("/documents.cfm", func(c echo.Context) error { - return c.Redirect(code, "/files/text") - }) - nginx.GET("/index.cfm", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - nginx.GET("/index.cfm/:uri", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - nginx.GET("/index.cfml/:uri", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - nginx.GET("/groups.cfm", func(c echo.Context) error { - return c.Redirect(code, "/releaser") - }) - nginx.GET("/magazines.cfm", func(c echo.Context) error { - return c.Redirect(code, "/magazine") - }) - nginx.GET("/nfo-files.cfm", func(c echo.Context) error { - return c.Redirect(code, "/files/nfo") - }) - nginx.GET("/portal.cfm", func(c echo.Context) error { - return c.Redirect(code, "/website") - }) - nginx.GET("/rewrite.cfm", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - nginx.GET("/site-info.cfm", func(c echo.Context) error { - return c.Redirect(code, "/") // there's no dedicated about site page - }) - return e -} - -// retired, redirects from the 2020 edition of the website. -func retired(e *echo.Echo) *echo.Echo { - if e == nil { - panic(ErrRoutes) - } - retired := e.Group("") - retired.GET("/code", func(c echo.Context) error { - return c.Redirect(code, "https://github.com/Defacto2/server") - }) - retired.GET("/commercial", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - retired.GET("/defacto", func(c echo.Context) error { - return c.Redirect(code, "/history") - }) - retired.GET("/defacto2/donate", func(c echo.Context) error { - return c.Redirect(code, "/thanks") - }) - retired.GET("/defacto2/history", func(c echo.Context) error { - return c.Redirect(code, "/history") - }) - retired.GET("/defacto2/subculture", func(c echo.Context) error { - return c.Redirect(code, "/thescene") - }) - retired.GET("/file/detail/:id", func(c echo.Context) error { + nginx.GET("/cracktros-detail.cfm/:id", func(c echo.Context) error { return c.Redirect(code, "/f/"+c.Param("id")) }) - retired.GET("/file/list/waitingapproval", func(c echo.Context) error { - return c.Redirect(code, "/files/for-approval") - }) - retired.GET("/file/index", func(c echo.Context) error { - return c.Redirect(code, "/file") - }) - retired.GET("/file/list/:uri", func(c echo.Context) error { - return c.Redirect(code, "/files/new-uploads") - }) - retired.GET("/files/json/site.webmanifest", func(c echo.Context) error { - return c.Redirect(code, "/site.webmanifest") - }) - retired.GET("/help/cc", func(c echo.Context) error { - return c.Redirect(code, "/") // there's no dedicated contact page - }) - retired.GET("/help/privacy", func(c echo.Context) error { - return c.Redirect(code, "/") // there's no dedicated privacy page - }) - retired.GET("/help/viruses", func(c echo.Context) error { - return c.Redirect(code, "/") // there's no dedicated virus page - }) - retired.GET("/home", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - retired.GET("/link/list", func(c echo.Context) error { - return c.Redirect(code, "/website") - }) - retired.GET("/link/list/:id", func(c echo.Context) error { - return c.Redirect(code, "/website") - }) - e = retiredOrg(e) - e = retiredPerson(e) - e = retiredUpload(e) - return e -} - -func retiredOrg(e *echo.Echo) *echo.Echo { - if e == nil { - panic(ErrRoutes) - } - org := e.Group("/organisation/list") //nolint:misspell - org.GET("", func(c echo.Context) error { - return c.Redirect(code, "/releaser") - }) - org.GET("/bbs", func(c echo.Context) error { - return c.Redirect(code, "/bbs") - }) - org.GET("/group", func(c echo.Context) error { - return c.Redirect(code, "/releaser") - }) - org.GET("/ftp", func(c echo.Context) error { - return c.Redirect(code, "/ftp") - }) - org.GET("/magazine", func(c echo.Context) error { - return c.Redirect(code, "/magazine") - }) - return e -} - -func retiredPerson(e *echo.Echo) *echo.Echo { - if e == nil { - panic(ErrRoutes) - } - person := e.Group("/person/list") - person.GET("/person/list", func(c echo.Context) error { - return c.Redirect(code, "/scener") - }) - person.GET("/artists", func(c echo.Context) error { - return c.Redirect(code, "/artist") - }) - person.GET("/coders", func(c echo.Context) error { - return c.Redirect(code, "/coder") - }) - person.GET("/musicians", func(c echo.Context) error { - return c.Redirect(code, "/musician") - }) - person.GET("/writers", func(c echo.Context) error { - return c.Redirect(code, "/writer") - }) - return e -} - -func retiredUpload(e *echo.Echo) *echo.Echo { - if e == nil { - panic(ErrRoutes) - } - upload := e.Group("/upload") - upload.GET("", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - upload.GET("/file", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - upload.GET("/external", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - upload.GET("/intro", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - upload.GET("/site", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - upload.GET("/document", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - upload.GET("/magazine", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - upload.GET("/art", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - upload.GET("/other", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - return e -} - -// wayback redirects. -func wayback(e *echo.Echo) *echo.Echo { - if e == nil { - panic(ErrRoutes) - } - wayback := e.Group("") - wayback.GET("/scene-archive/:uri", func(c echo.Context) error { - return c.Redirect(code, "/") - }) - wayback.GET("/includes/documentsweb/df2web99/scene-archive/history.html", func(c echo.Context) error { - return c.Redirect(code, "/wayback/defacto2-from-1999-september-26/scene-archive/history.html") - }) - wayback.GET("/includes/documentsweb/tKC_history.html", func(c echo.Context) error { - return c.Redirect(code, "/wayback/the-life-and-legend-of-tkc-2000-october-10/index.html") - }) - wayback.GET("/legacy/apollo-x/:uri", func(c echo.Context) error { - return c.Redirect(code, "/wayback/apollo-x-demo-resources-1999-december-17/:uri") - }) - wayback.GET("/web/20120827022026/http:/www.defacto2.net:80/file/list/nfotool", func(c echo.Context) error { - return c.Redirect(code, "/files/nfo-tool") - }) - wayback.GET("/web.pages/warez_world-1.htm", func(c echo.Context) error { - return c.Redirect(code, "/wayback/warez-world-from-2001-july-26/index.html") + nginx.GET("/wayback/:url", func(c echo.Context) error { + // todo: Test this redirect. + return c.Redirect(code, "https://wayback.defacto2.net/"+c.Param("url")) }) return e } diff --git a/.air.toml b/init/.air.toml similarity index 96% rename from .air.toml rename to init/.air.toml index 21a3cc1e..1a26b8b8 100644 --- a/.air.toml +++ b/init/.air.toml @@ -22,7 +22,7 @@ full_bin = "REQUESTS=true ./tmp/main" # Watch these filename extensions. include_ext = ["go", "tpl", "tmpl", "html", "css", "js"] # Ignore these filename extensions or directories. -exclude_dir = ["dist/", "tmp/", "vendor/", ".git/"] +exclude_dir = [".git/", "dist/", "docs", "init/", "tmp/", "vendor/"] # Watch these directories if you specified. include_dir = [] # Watch these files. diff --git a/.golangci.yaml b/init/.golangci.yaml similarity index 100% rename from .golangci.yaml rename to init/.golangci.yaml diff --git a/.goreleaser-release.yaml b/init/.goreleaser-release.yaml similarity index 100% rename from .goreleaser-release.yaml rename to init/.goreleaser-release.yaml diff --git a/.goreleaser.yaml b/init/.goreleaser.yaml similarity index 98% rename from .goreleaser.yaml rename to init/.goreleaser.yaml index 3d645d15..a6a75c47 100644 --- a/.goreleaser.yaml +++ b/init/.goreleaser.yaml @@ -19,6 +19,7 @@ before: - go mod tidy builds: - id: server + main: server.go env: - CGO_ENABLED=0 targets: diff --git a/.sqlboiler.toml b/init/.sqlboiler.toml similarity index 78% rename from .sqlboiler.toml rename to init/.sqlboiler.toml index 354ba5e6..a1a0c89b 100644 --- a/.sqlboiler.toml +++ b/init/.sqlboiler.toml @@ -48,10 +48,17 @@ wipe = true # PostgreSQL database connection settings and defaults. # https://github.com/volatiletech/sqlboiler?tab=readme-ov-file#database-driver-configuration +# +# These values are used by the Go generate command to connect to the PostgreSQL database. +# It is only required when the database schema has changed and the Go models need to be regenerated. +# Production deployment SECRETS MUST NOT be stored in this file. +# +# example connection URL: postgres://pglogrepl:secret@127.0.0.1/pglogrepl?replication=database +# [psql] schema = "public" -dbname = "defacto2-ps" +dbname = "defacto2_ps" host = "localhost" port = 5432 user = "root" diff --git a/init/defacto2.service b/init/defacto2.service new file mode 100644 index 00000000..5d3dc6b1 --- /dev/null +++ b/init/defacto2.service @@ -0,0 +1,41 @@ +# /etc/systemd/system/defacto2.service +# +# For using Defacto2 with a config file. +# +# systemctl daemon-reload +# systemctl enable defacto2.service +# systemctl start defacto2.service +# systemctl status defacto2.service +# journalctl -e -u defacto2.service +# +# About these commands, see: +# https://www.freedesktop.org/software/systemd/man/latest/systemd.directives.html + + +[Unit] +Description=Defacto2 +Documentation=https://github.com/Defacto2/server +After=network.target network-online.target +Requires=network-online.target + +[Service] +Environment="D2_MATCH_HOST=localhost" +Environment="D2_DATABASE_URL=postgres://root:example@localhost:5432/defacto2_ps" +Environment="D2_DIR_DOWNLOAD=/mnt/volume_sfo3_01/assets/downloads" +Environment="D2_DIR_PREVIEW=/mnt/volume_sfo3_01/assets/images000" +Environment="D2_DIR_THUMBNAIL=/mnt/volume_sfo3_01/assets/images400" +Environment="D2_DIR_LOG=/var/log/caddy" +Environment="D2_PROD_MODE=true" "D2_READ_ONLY=false" "D2_NO_CRAWL=true" +User=caddy +Group=caddy +ExecStart=/usr/bin/df2-server +ProtectHome=true +ProtectSystem=full +PrivateTmp=yes +TimeoutStopSec=15s + +[Install] +WantedBy=multi-user.target + +#ExecStart=/usr/bin/caddy run --environ --config /etc/caddy/Caddyfile +#ExecReload=/usr/bin/caddy reload --config /etc/caddy/Caddyfile --force diff --git a/example.env.local b/init/example.env.local similarity index 73% rename from example.env.local rename to init/example.env.local index 5ba9d57e..ca2d57ce 100644 --- a/example.env.local +++ b/init/example.env.local @@ -18,42 +18,21 @@ # ============================================================================== # List the directory path that holds the named UUID files for the artifact downloads. -D2_DOWNLOAD_DIR= +D2_DIR_DOWNLOAD= # List the directory path that holds the named UUID files for the artifact images. -D2_PREVIEW_DIR= +D2_DIR_PREVIEW= # List the directory path that holds the named UUID files for the artifact thumbnails. -D2_THUMBNAIL_DIR= +D2_DIR_THUMBNAIL= # ============================================================================== # These are the PostgreSQL database connection settings. # The database is required for accessing and displaying the artifact data. # ============================================================================== -# Provide the name of the server host to which to connect. -# The default value is localhost, and you may use host.docker.internal for a -# Docker container, but this can usually be left unchanged. -#PS_HOST_NAME=localhost - -# Port number the Postgres database server is listening on. -# The default value is 5432. -#HOST_PORT=5432 - -# Connect to the database using an insecure, plain text connection. -# The default value is true. -NO_SSL=true - -# Provide the name of the database to which to connect. -# The default value is defacto2-ps, but this can usually be left unchanged. -#PS_DATABASE=defacto2-ps - -# Provide a username of a database account used to connect. -# This account must have read and write access to the PS_DATABASE. -PS_USERNAME=root - -# Provide a password for the database account used to connect. -PS_PASSWORD=example +# The connection string to the PostgreSQL database. +#D2_DATABASE_URL=postgres://root:example@localhost:5432/defacto2_ps # ============================================================================== # These are the web application and server settings. @@ -62,21 +41,29 @@ PS_PASSWORD=example # The HTTP unencrypted port number that the web server will listen on. # It is recommended to use a port number greater than 1024. # The default port number is 1323, while the common HTTP port number is 80. -D2_HTTP_PORT=1323 +#D2_HTTP_PORT=1323 -# Enable either "gzip" or "br" compression of HTTP/HTTPS responses; -# you may turn this off if you are using a reverse proxy. -# The default value is gzip, or use "disabled" to turn off compression. -#D2_COMPRESSION="gzip" +# Only listen to HTTP/HTTPS requests from a specific host, domain name or IP address. +# Leave it blank to permit connections from any host. +#D2_MATCH_HOST= + +# Production mode changes the logging output to files and enables the recovery +# from software crashes and panics. The server will log significant errors to a +# file. +# When production mode is disabled, the server will also skip the database +# connection and file checks on startup to speed up the initialization. +# The default value is false. +#D2_PROD_MODE=false # Always tell search engines not to crawl any of the website pages or assets. # This setting is useful for preventing search engines from indexing the website. # The default value is false. #D2_NO_CRAWL=false -# Skip the database connection and file checks on server startup to speed up -# the initialization. -#D2_FAST_START=false +# Enable gzip compression of the HTTP/HTTPS responses. +# You may turn this off if you are using a reverse proxy. +# The default value is false. +#D2_COMPRESSION="false" # Use the read-only mode to turn off all POST, PUT, and DELETE requests # and any related user interface such as the editor mode and the uploader. @@ -108,33 +95,18 @@ D2_READ_ONLY=true # self-signed, localhost key. #D2_TLS_KEY= -# An advised setting limits TLS to the specific host or domain name; -# leave it blank to permit TLS connections from any host. -#D2_TLS_HOST= - -# The web server will redirect all unencrypted HTTP requests to encrypted HTTPS. -# This setting is useful for enforcing secure connections but will break the -# server's functionality if HTTPS is not correctly configured. -#D2_HTTPS_REDIRECT=false - # ============================================================================== # Logger settings. # ============================================================================== -# Production mode changes the logging output to files and enables the recovery -# from software crashes and panics. The server will log significan errors to a -# file. -# The default value is false. -#D2_PRODUCTION_MODE=false - -# Log all HTTP/HTTPS client requests to a file except those with 200 OK responses. +# Log all HTTP/HTTPS client requests to the terminal (stdout). # The default value is false. -#D2_LOG_REQUESTS=false +#D2_LOG_ALL=false # The absolute directory path will store all logs generated by this application. # If the directory is invalid, the application will attempt to use a # configuration directory in the user's home. -#D2_LOG_DIR= +#D2_DIR_LOG= # ============================================================================== # The Google OAuth2 settings are used for the editor mode to enable select diff --git a/example.server.sh b/init/example.server.sh similarity index 100% rename from example.server.sh rename to init/example.server.sh diff --git a/internal/config/check.go b/internal/config/check.go index 2f9f5763..bfa5b3b4 100644 --- a/internal/config/check.go +++ b/internal/config/check.go @@ -9,31 +9,25 @@ import ( "path/filepath" "github.com/Defacto2/server/internal/helper" - "github.com/Defacto2/server/internal/postgres" "go.uber.org/zap" ) const ( PortMax = 65534 // PortMax is the highest valid port number. PortSys = 1024 // PortSys is the lowest valid port number that does not require system access. - - toFewFiles = 10 // toFewFiles is the minimum number of files required in a directory. ) var ( - ErrPortMax = fmt.Errorf("http port value must be between 1-%d", PortMax) - ErrPortSys = fmt.Errorf("http port values between 1-%d require system access", PortSys) - ErrDir = errors.New("the directory path is not set") - ErrDir404 = errors.New("the directory path does not exist") - ErrDirIs = errors.New("the directory path points to the file") - ErrDirRead = errors.New("the directory path could not be read") - ErrDirFew = errors.New("the directory path contains only a few items") - ErrUnencrypted = errors.New("the production server is configured to use unencrypted HTTP connections") - ErrNoOAuth2 = errors.New("the production server requires a google, oauth2 client id to allow admin logins") - ErrNoAccounts = errors.New("the production server has no google oauth2 user accounts to allow admin logins") - ErrSessionKey = errors.New("the production server has a session, " + - "encryption key set instead of using a randomized key") - ErrZap = errors.New("the zap logger instance is nil") + ErrPortMax = fmt.Errorf("http port value must be between 1-%d", PortMax) + ErrPortSys = fmt.Errorf("http port values between 1-%d require system access", PortSys) + ErrDir = errors.New("the directory path is not set") + ErrDir404 = errors.New("the directory path does not exist") + ErrDirIs = errors.New("the directory path points to the file") + ErrDirRead = errors.New("the directory path could not be read") + ErrDirFew = errors.New("the directory path contains only a few items") + ErrNoOAuth2 = errors.New("the production server requires a google, oauth2 client id to allow admin logins") + ErrNoAccounts = errors.New("the production server has no google oauth2 user accounts to allow admin logins") + ErrZap = errors.New("the zap logger instance is nil") ) // Checks runs a number of sanity checks for the environment variable configurations. @@ -42,38 +36,30 @@ func (c *Config) Checks(logger *zap.SugaredLogger) error { return ErrZap } - if c.HTTPSRedirect && c.TLSPort == 0 { - logger.Warn("HTTPSRedirect is on but the HTTPS port is not set," + - " so the server will not redirect HTTP requests to HTTPS.") - } - c.httpPort(logger) c.tlsPort(logger) c.production(logger) // Check the download, preview and thumbnail directories. - if err := DownloadDir(c.DownloadDir); err != nil { - s := helper.Capitalize(err.Error()) + "." - logger.Warn(s) + if err := CheckDir(c.AbsDownload, "downloads"); err != nil { + s := helper.Capitalize(err.Error()) + logger.Error(s) } - if err := PreviewDir(c.PreviewDir); err != nil { - s := helper.Capitalize(err.Error()) + "." - logger.Warn(s) + if err := CheckDir(c.AbsPreview, "previews"); err != nil { + s := helper.Capitalize(err.Error()) + logger.Error(s) } - if err := ThumbnailDir(c.ThumbnailDir); err != nil { - s := helper.Capitalize(err.Error()) + "." - logger.Warn(s) + if err := CheckDir(c.AbsThumbnail, "thumbnails"); err != nil { + s := helper.Capitalize(err.Error()) + logger.Error(s) } // Reminds for the optional configuration values. if c.NoCrawl { - logger.Warn("NoCrawl is on, web crawlers should ignore this site.") + logger.Warn("Disallow search engine crawling is enabled") } - if c.HTTPSRedirect && c.TLSPort > 0 { - logger.Info("HTTPSRedirect is on, all HTTP requests will be redirected to HTTPS.") - } - if c.HostName == postgres.DockerHost { - logger.Info("The application is configured for use in a Docker container.") + if c.ReadOnly { + logger.Warn("The server is running in read-only mode, edits to the database are not allowed") } return c.SetupLogDir(logger) @@ -117,39 +103,26 @@ func (c Config) tlsPort(logger *zap.SugaredLogger) { // expects the server to be configured with OAuth2 and Google IDs. // The server should be running over HTTPS and not unencrypted HTTP. func (c Config) production(logger *zap.SugaredLogger) { - if !c.ProductionMode || c.ReadMode { + if !c.ProdMode || c.ReadOnly { return } if c.GoogleClientID == "" { - s := helper.Capitalize(ErrNoOAuth2.Error()) + "." + s := helper.Capitalize(ErrNoOAuth2.Error()) logger.Warn(s) } if c.GoogleIDs == "" && len(c.GoogleAccounts) == 0 { - s := helper.Capitalize(ErrNoAccounts.Error()) + "." + s := helper.Capitalize(ErrNoAccounts.Error()) logger.Warn(s) } - if c.HTTPPort > 0 { - s := fmt.Sprintf("%s over port %d.", - helper.Capitalize(ErrUnencrypted.Error()), - c.HTTPPort) - logger.Info(s) - } - if c.SessionKey != "" { - s := helper.Capitalize(ErrSessionKey.Error()) + "." - logger.Warn(s) - logger.Warn("This means that all signed in clients will not be logged out on a server restart.") - } - if c.SessionMaxAge > 0 { - logger.Infof("A signed in client session lasts for %d hour(s).", c.SessionMaxAge) - } else { - logger.Warn("A signed in client session lasts forever.") + if c.SessionMaxAge == 0 { + logger.Warn("A signed in client session lasts forever, this is a security risk") } } // LogStore determines the local storage path for all log files created by this web application. func (c *Config) LogStore() error { const ownerGroupAll = 0o770 - logs := c.LogDir + logs := c.AbsLog if logs == "" { dir, err := os.UserConfigDir() if err != nil { @@ -162,7 +135,7 @@ func (c *Config) LogStore() error { return fmt.Errorf("%w: %s", err, logs) } } - c.LogDir = logs + c.AbsLog = logs return nil } @@ -173,21 +146,19 @@ func (c *Config) SetupLogDir(logger *zap.SugaredLogger) error { if logger == nil { return ErrZap } - if c.LogDir == "" { + if c.AbsLog == "" { if err := c.LogStore(); err != nil { return fmt.Errorf("%w: %w", ErrLog, err) } - } else { - logger.Info("The server logs are found in: ", c.LogDir) } - dir, err := os.Stat(c.LogDir) + dir, err := os.Stat(c.AbsLog) if os.IsNotExist(err) { - return fmt.Errorf("log directory %w: %s", ErrDirNotExist, c.LogDir) + return fmt.Errorf("log directory %w: %s", ErrDirNotExist, c.AbsLog) } if !dir.IsDir() { return fmt.Errorf("log directory %w: %s", ErrNotDir, dir.Name()) } - empty := filepath.Join(c.LogDir, ".defacto2_touch_test") + empty := filepath.Join(c.AbsLog, ".defacto2_touch_test") if _, err := os.Stat(empty); os.IsNotExist(err) { f, err := os.Create(empty) if err != nil { @@ -222,34 +193,9 @@ func CheckDir(name, desc string) error { if !dir.IsDir() { return fmt.Errorf("%w, %s: %s", ErrDirIs, desc, dir.Name()) } - files, err := os.ReadDir(name) - if err != nil { - return fmt.Errorf("%w, %s: %w", ErrDirRead, desc, err) - } - if len(files) < toFewFiles { - return fmt.Errorf("%w, %s: %s", ErrDirFew, desc, dir.Name()) - } return nil } -// DownloadDir runs checks against the named directory containing the UUID artifact downloads. -// Problems will either log warnings or fatal errors. -func DownloadDir(name string) error { - return CheckDir(name, "download") -} - -// PreviewDir runs checks against the named directory containing the preview and screenshot images. -// Problems will either log warnings or fatal errors. -func PreviewDir(name string) error { - return CheckDir(name, "preview") -} - -// ThumbnailDir runs checks against the named directory containing the thumbnail images. -// Problems will either log warnings or fatal errors. -func ThumbnailDir(name string) error { - return CheckDir(name, "thumbnail") -} - // Validate returns an error if the HTTP or TLS port is invalid. func Validate(port uint) error { const disabled = 0 diff --git a/internal/config/config.go b/internal/config/config.go index d90a4339..19244ceb 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -5,17 +5,21 @@ import ( "crypto/sha512" "errors" "fmt" + "net/url" "reflect" + "sort" "strings" "text/tabwriter" "github.com/Defacto2/server/internal/helper" - "github.com/Defacto2/server/internal/postgres" ) const ( - ConfigDir = "defacto2-app" // ConfigDir is the subdirectory for the home user ".config". - HTTPPort = 1323 // HTTPPort is the default port number for the unencrypted HTTP server. + ConfigDir = "defacto2-app" // ConfigDir is the subdirectory for the home user ".config". + HTTPPort = 1323 // HTTPPort is the default port number for the unencrypted HTTP server. + SessionHours = 3 // SessionHours is the default number of hours for the session cookie to remain active. + MinimumFiles = 40000 // MinimumFiles is the minimum number of unique filenames expected in an asset subdirectory. + hide = "XXXXXXXX" ) var ErrNoPort = errors.New("the server cannot start without a http or a tls port") @@ -24,33 +28,30 @@ var ErrNoPort = errors.New("the server cannot start without a http or a tls port // // [caarlos0/env]:https://github.com/caarlos0/env type Config struct { - Compression string `env:"D2_COMPRESSION" envDefault:"gzip" help:"Enable either gzip or br compression of HTTP/HTTPS responses; you may turn this off if you are using a reverse proxy"` - LogDir string `env:"D2_LOG_DIR" help:"The absolute directory path will store all logs generated by this application"` - DownloadDir string `env:"D2_DOWNLOAD_DIR" help:"The directory path that holds the UUID named files that are served as artifact downloads"` - PreviewDir string `env:"D2_PREVIEW_DIR" help:"The directory path that holds the UUID named image files that are served as previews of the artifact"` - ThumbnailDir string `env:"D2_THUMBNAIL_DIR" help:"The directory path that holds the UUID named squared image files that are served as artifact thumbnails"` + AbsLog string `env:"D2_DIR_LOG" help:"The absolute directory path will store all logs generated by this application"` + AbsDownload string `env:"D2_DIR_DOWNLOAD" help:"The directory path that holds the UUID named files that are served as artifact downloads"` + AbsPreview string `env:"D2_DIR_PREVIEW" help:"The directory path that holds the UUID named image files that are served as previews of the artifact"` + AbsThumbnail string `env:"D2_DIR_THUMBNAIL" help:"The directory path that holds the UUID named squared image files that are served as artifact thumbnails"` + DatabaseURL string `env:"D2_DATABASE_URL" help:"Provide the URL of the database to which to connect"` SessionKey string `env:"D2_SESSION_KEY,unset" help:"Use a fixed session key for the cookie store, which can be left blank to generate a random key"` - GoogleClientID string `env:"D2_GOOGLE_CLIENT_ID" help:"The Google OAuth2 client ID"` + GoogleClientID string `env:"D2_GOOGLE_CLIENT_ID,unset" help:"The Google OAuth2 client ID"` GoogleIDs string `env:"D2_GOOGLE_IDS,unset" help:"Create a comma-separated list of Google account IDs to permit access to the editor mode"` + MatchHost string `env:"D2_MATCH_HOST" help:"Limits connections to the specific host or domain name; leave blank to permit connections from anywhere"` TLSCert string `env:"D2_TLS_CERT" help:"An absolute file path to the TLS certificate, or leave blank to use a self-signed, localhost certificate"` TLSKey string `env:"D2_TLS_KEY" help:"An absolute file path to the TLS key, or leave blank to use a self-signed, localhost key"` - TLSHost string `env:"D2_TLS_HOST" help:"An advised setting limits TLS to the specific host or domain name; leave it blank to permit TLS connections from any host"` - HostName string `env:"PS_HOST_NAME"` // this should only be used internally, instead see postgres.Connection{} - HTTPPort uint `env:"D2_HTTP_PORT" envDefault:"1323" help:"The port number to be used by the unencrypted HTTP web server"` + HTTPPort uint `env:"D2_HTTP_PORT" help:"The port number to be used by the unencrypted HTTP web server"` MaxProcs uint `env:"D2_MAX_PROCS" help:"Limit the number of operating system threads the program can use"` - SessionMaxAge int `env:"D2_SESSION_MAX_AGE" envDefault:"3" help:"List the maximum number of hours for the session cookie to remain active before expiring and requiring a new login"` + SessionMaxAge int `env:"D2_SESSION_MAX_AGE" help:"List the maximum number of hours for the session cookie to remain active before expiring and requiring a new login"` TLSPort uint `env:"D2_TLS_PORT" help:"The port number to be used by the encrypted, HTTPS web server"` - ProductionMode bool `env:"D2_PRODUCTION_MODE" help:"Use the production mode to log errors to a file and recover from panics"` - FastStart bool `env:"D2_FAST_START" help:"Skip the database connection and file checks on server startup to speed up the initialization"` - ReadMode bool `env:"D2_READ_ONLY" envDefault:"true" help:"Use the read-only mode to turn off all POST, PUT, and DELETE requests and any related user interface"` + Quiet bool `env:"D2_QUIET" help:"Suppress most startup output to the terminal, intended for use with systemd or other process managers"` + Compression bool `env:"D2_COMPRESSION" help:"Enable gzip compression of the HTTP/HTTPS responses; you may turn this off when using a reverse proxy"` + ProdMode bool `env:"D2_PROD_MODE" help:"Use the production mode to log errors to a file and recover from panics"` + ReadOnly bool `env:"D2_READ_ONLY" help:"Use the read-only mode to turn off all POST, PUT, and DELETE requests and any related user interface"` NoCrawl bool `env:"D2_NO_CRAWL" help:"Tell search engines to not crawl any of website pages or assets"` - LogRequests bool `env:"D2_LOG_REQUESTS" help:"Log all HTTP and HTTPS client requests including those with 200 OK responses"` - HTTPSRedirect bool `env:"D2_HTTPS_REDIRECT" help:"Redirect all HTTP requests to HTTPS"` + LogAll bool `env:"D2_LOG_ALL" help:"Log all HTTP and HTTPS client requests including those with 200 OK responses"` // GoogleAccounts is a slice of Google OAuth2 accounts that are allowed to login. // Each account is a 48 byte slice of bytes that represents the SHA-384 hash of the unique Google ID. GoogleAccounts [][48]byte - // LocalMode is a "go build -ldflags" to fix the server to always run in local mode. - LocalMode bool } const ( @@ -62,14 +63,11 @@ const ( h1 = "Configuration" h2 = "Value" h3 = "Environment variable" - h4 = "Value type" - h5 = "Information" line = "─" - donotuse = 7 - down = "DownloadDir" - logger = "LogDir" - prev = "PreviewDir" - thumb = "ThumbnailDir" + down = "AbsDownload" + logger = "AbsLog" + prev = "AbsPreview" + thumb = "AbsThumbnail" ) // String returns a string representation of the Config struct. @@ -78,7 +76,6 @@ const ( func (c Config) String() string { b := new(strings.Builder) c.configurations(b) - fmt.Fprintf(b, "\n") return b.String() } @@ -117,8 +114,7 @@ func (c Config) addresses(b *strings.Builder, intro bool) error { } const disable, text, secure = 0, 80, 443 for _, host := range hosts { - if c.HostName == postgres.DockerHost && host != "localhost" { - // skip all but localhost when running in docker + if c.MatchHost != "" && host != c.MatchHost { continue } switch port { @@ -129,9 +125,6 @@ func (c Config) addresses(b *strings.Builder, intro bool) error { default: fmt.Fprintf(b, "%shttp://%s:%d\n", pad, host, port) } - if c.TLSHost != "" && host != c.TLSHost { - continue - } switch tls { case secure: fmt.Fprintf(b, "%shttps://%s\n", pad, host) @@ -141,10 +134,10 @@ func (c Config) addresses(b *strings.Builder, intro bool) error { fmt.Fprintf(b, "%shttps://%s:%d\n", pad, host, tls) } } - if c.HostName == postgres.DockerHost { - return nil + if c.MatchHost == "" { + return localIPs(b, port, pad) } - return localIPs(b, port, pad) + return nil } func addrIntro(b *strings.Builder, intro bool) { @@ -172,142 +165,220 @@ func localIPs(b *strings.Builder, port uint64, pad string) error { return nil } -// nl prints a new line to the tabwriter. -func nl(w *tabwriter.Writer) { - fmt.Fprintf(w, "\t\t\t\t\n") -} - // dir prints the directory path to the tabwriter or a warning if the path is empty. -func dir(w *tabwriter.Writer, id, s string) { - if s != "" { - fmt.Fprintf(w, "\t\t\tPATH →\t%s\n", s) +func dir(w *tabwriter.Writer, id, name, val string) { + fmt.Fprintf(w, "\t%s\t%s", fmtID(id), name) + if val != "" { + // todo: stat the directory + fmt.Fprintf(w, "\t%s\n", val) return } - fmt.Fprintf(w, "\t\t\tPATH →\t%s", "[NO DIRECTORY SET]") switch id { case down: - fmt.Fprintf(w, "\tNo downloads will be served.\n") + fmt.Fprintf(w, "\tEmpty, no downloads will be served\n") case prev: - fmt.Fprintf(w, "\tNo preview images will be shown.\n") + fmt.Fprintf(w, "\tEmpty, no preview images will be shown\n") case thumb: - fmt.Fprintf(w, "\tNo thumbnails will be shown.\n") + fmt.Fprintf(w, "\tEmpty, no thumbnails will be shown\n") case logger: - fmt.Fprintf(w, "\tLogs will be printed to this terminal.\n") + fmt.Fprintf(w, "\tEmpty, logs print to the terminal (stdout)\n") default: fmt.Fprintln(w) } } -// lead prints the id, name, value and help text to the tabwriter. -func lead(w *tabwriter.Writer, id, name string, val reflect.Value, field reflect.StructField) { - help := field.Tag.Get("help") - fmt.Fprintf(w, "\t%s\t%s\t%v\t%s.\n", helper.SplitAsSpaces(id), name, val, help) -} - -// path prints the file and image paths to the tabwriter. -func path(w *tabwriter.Writer, id, name string, field reflect.StructField) { - help := field.Tag.Get("help") +func fmtID(id string) string { switch id { case down: - help = strings.Replace(help, "UUID named files", "UUID named files\n\t\t\t\t", 1) + return "Downloads, directory path" case prev: - help = strings.Replace(help, "UUID named image", "UUID named image\n\t\t\t\t", 1) + return "Previews, directory path" case thumb: - help = strings.Replace(help, "UUID named squared image", "UUID named squared image\n\t\t\t\t", 1) + return "Thumbnails, directory path" + case logger: + return "Logs, directory path" + case "Compression": + return "Gzip compression" + case "DatabaseURL": + return "Database connection, URL" + case "GoogleClientID": + return "Google OAuth2 client ID" + case "GoogleIDs": + return "Google IDs for sign-in" + case "LogAll": + return "Log all HTTP requests" + case "MaxProcs": + return "Maximum CPU processes" + case "MatchHost": + return "Match hostname, domain or IP address" + case "NoCrawl": + return "Disallow search engine crawling" + case "ProdMode": + return "Production mode" + case "Quiet": + return "Quiet mode" + case "ReadOnly": + return "Read-only mode" + case "SessionKey": + return "Session encryption key" + case "SessionMaxAge": + return "Session, maximum age" + case "TLSCert": + return "TLS certificate, file path" + case "TLSHost": + return "TLS hostname" + case "TLSKey": + return "TLS key, file path" + default: + return helper.SplitAsSpaces(id) } - fmt.Fprintf(w, "\t%s\t%s\t\t%s.\n", helper.SplitAsSpaces(id), name, help) } -// isProd prints a warning if the production mode is disabled. -func isProd(w *tabwriter.Writer, id, name string, val reflect.Value, field reflect.StructField) { - lead(w, id, name, val, field) - if val.Kind() == reflect.Bool && !val.Bool() { - fmt.Fprintf(w, "\t\t\t\t%s\n", - "All errors and warnings will be logged to this console.") +// value prints the id, name, value and help text to the tabwriter. +func value(w *tabwriter.Writer, id, name string, val reflect.Value) { + if val.Kind() == reflect.Bool { + status := "Off" + if val.Bool() { + status = "On" + } + fmt.Fprintf(w, "\t%s\t%s\t%v\n", fmtID(id), name, status) + return + } + fmt.Fprintf(w, "\t%s\t%s\t", fmtID(id), name) + switch id { + case "GoogleClientID": + if val.String() == "" { + fmt.Fprint(w, "Empty, no account sign-in for web administration\n") + return + } + fmt.Fprintln(w, hide) + case "MatchHost": + if val.String() == "" { + fmt.Fprint(w, "Empty, no address restrictions\n") + return + } + fmt.Fprintln(w, val.String()) + case "SessionKey": + if val.String() == "" { + fmt.Fprint(w, "Empty, a random key will be generated during the server start\n") + return + } + fmt.Fprintln(w, hide) + case "SessionMaxAge": + fmt.Fprintf(w, "%v hours\n", val.Int()) + case "DatabaseURL": + fmt.Fprintln(w, hidePassword(val.String())) + default: + if val.String() == "" { + fmt.Fprint(w, "Empty\n") + return + } + fmt.Fprintf(w, "%v\n", val) } } // httpPort prints the HTTP port number to the tabwriter. -func httpPort(w *tabwriter.Writer, id, name string, val reflect.Value, field reflect.StructField) { - nl(w) - lead(w, id, name, val, field) - fmt.Fprintf(w, "\t\t\t\t%s\n", - "The typical HTTP port number is 80, while for proxies it is 8080.") +func httpPort(w *tabwriter.Writer, id, name string, val reflect.Value) { + fmt.Fprintf(w, "\t%s\t%s\t", fmtID(id), name) if val.Kind() == reflect.Uint && val.Uint() == 0 { - fmt.Fprintf(w, "\t\t\t\t%s\n", "The server will use the default port number 1323.") + fmt.Fprintf(w, "%s\n", "0, the web server will not use HTTP") + return } + port := val.Uint() + const common = 80 + if port == common { + fmt.Fprintf(w, "%d, the web server will use HTTP, example: http://localhost\n", port) + return + } + fmt.Fprintf(w, "%d, the web server will use HTTP, example: http://localhost:%d\n", port, port) } // tlsPort prints the HTTPS port number to the tabwriter. -func tlsPort(w *tabwriter.Writer, id, name string, val reflect.Value, field reflect.StructField) { - nl(w) - lead(w, id, name, val, field) - fmt.Fprintf(w, "\t\t\t\t%s\n", - "The typical TLS port number is 443, while for proxies it is 8443.") +func tlsPort(w *tabwriter.Writer, id, name string, val reflect.Value) { + fmt.Fprintf(w, "\t%s\t%s\t", fmtID(id), name) if val.Kind() == reflect.Uint && val.Uint() == 0 { - fmt.Fprintf(w, "\t\t\t\t%s\n", "The server will not use TLS.") + fmt.Fprintf(w, "%s\n", "0, the web server will not use HTTPS") + return } + port := val.Uint() + const common = 443 + if port == common { + fmt.Fprintf(w, "%d, the web server will use HTTPS, example: https://localhost\n", port) + return + } + fmt.Fprintf(w, "%d, the web server will use HTTPS, example: https://localhost:%d\n", port, port) +} + +// tlsCert prints the TLS certificate and key locations to the tabwriter. +func tlsCert(w *tabwriter.Writer, id, name string, val reflect.Value, tlsport uint) { + if tlsport == 0 { + fmt.Println(w, "Not used") + return + } + if val.String() == "" { + fmt.Fprintf(w, "\t%s\t%s\tEmpty, will use a placeholder configuration\n", fmtID(id), name) + return + } + value(w, id, name, val) } // maxProcs prints the number of CPU cores to the tabwriter. -func maxProcs(w *tabwriter.Writer, id, name string, val reflect.Value, field reflect.StructField) { - nl(w) - fmt.Fprintf(w, "\t%s\t%s\t%v\t%s.", id, name, 0, field.Tag.Get("help")) +func maxProcs(w *tabwriter.Writer, id, name string, val reflect.Value) { + fmt.Fprintf(w, "\t%s\t%s\t", fmtID(id), name) if val.Kind() == reflect.Uint && val.Uint() == 0 { - fmt.Fprintf(w, "\n\t\t\t\t%s\n", "This application will use all available CPU cores.") + fmt.Fprintf(w, "%s\n", "0, the application will use all available CPU threads") + return } + fmt.Fprintf(w, "%d, the application will limit access to CPU threads\n", val.Uint()) } -// googleHead prints a header for the Google OAuth2 configurations. -func googleHead(w *tabwriter.Writer, c Config) { - if !c.ProductionMode && c.ReadMode { - return +// hidePassword replaces the password in the URL with XXXXXs. +func hidePassword(rawURL string) string { + u, err := url.Parse(rawURL) + if err != nil { + return rawURL } - nl(w) - fmt.Fprintf(w, "\t \t \t\t──────────────────────────────────────────────────────────────────────\n") - fmt.Fprintf(w, "\t \t \t\t The following configurations can usually be left at their defaults\n") - fmt.Fprintf(w, "\t \t \t\t──────────────────────────────────────────────────────────────────────") + _, exists := u.User.Password() + if !exists { + return rawURL + } + u.User = url.UserPassword(u.User.Username(), hide) + return u.String() } // configurations prints a list of active configurations options. func (c Config) configurations(b *strings.Builder) *strings.Builder { fields := reflect.VisibleFields(reflect.TypeOf(c)) + sort.Slice(fields, func(i, j int) bool { + return fields[i].Name < fields[j].Name + }) values := reflect.ValueOf(c) + w := tabwriter.NewWriter(b, minwidth, tabwidth, padding, padchar, flags) - fmt.Fprint(b, "Defacto2 server active configuration options.\n\n") - fmt.Fprintf(w, "\t%s\t%s\t%s\t%s\n", - h1, h3, h2, h5) - fmt.Fprintf(w, "\t%s\t%s\t%s\t%s\n", + fmt.Fprint(b, "The Defacto2 server configuration:\n\n") + fmt.Fprintf(w, "\t%s\t%s\t%s\n", + h1, h3, h2) + fmt.Fprintf(w, "\t%s\t%s\t%s\n", strings.Repeat(line, len(h1)), strings.Repeat(line, len(h3)), - strings.Repeat(line, len(h2)), - strings.Repeat(line, len(h5))) + strings.Repeat(line, len(h2))) for _, field := range fields { if !field.IsExported() { continue } switch field.Name { - case "GoogleAccounts", "LocalMode", "HostName": + case "GoogleAccounts": continue default: } - // mode for development and readonly which is set using the go build flags. - if c.LocalMode || (!c.ProductionMode && c.ReadMode) { - if AccountSkip(field.Name) { - continue - } - } - if c.LocalMode && LocalSkip(field.Name) { - continue - } val := values.FieldByName(field.Name) id := field.Name name := field.Tag.Get("env") if before, found := strings.CutSuffix(name, ",unset"); found { name = before } - c.fmtField(w, id, name, val, field) + c.fmtField(w, id, name, val) } w.Flush() return b @@ -316,68 +387,34 @@ func (c Config) configurations(b *strings.Builder) *strings.Builder { // fmtField prints the id, name, value and help text to the tabwriter. func (c Config) fmtField(w *tabwriter.Writer, id, name string, - val reflect.Value, field reflect.StructField, + val reflect.Value, ) { + fmt.Fprintf(w, "\t\t\t\t\n") switch id { - case "ProductionMode": - isProd(w, id, name, val, field) case "HTTPPort": - httpPort(w, id, name, val, field) + httpPort(w, id, name, val) case "TLSPort": - tlsPort(w, id, name, val, field) - case down: - nl(w) - path(w, id, name, field) - dir(w, id, c.PreviewDir) - case prev: - nl(w) - path(w, id, name, field) - dir(w, id, c.PreviewDir) - case thumb: - nl(w) - path(w, id, name, field) - dir(w, id, c.ThumbnailDir) - case logger: - nl(w) - path(w, id, name, field) - dir(w, id, c.LogDir) + tlsPort(w, id, name, val) + case "TLSCert", "TLSKey": + tlsCert(w, id, name, val, c.TLSPort) + case down, prev, thumb, logger: + dir(w, id, name, val.String()) case "MaxProcs": - maxProcs(w, id, name, val, field) - googleHead(w, c) + maxProcs(w, id, name, val) + case "GoogleIDs": + l := len(c.GoogleAccounts) + fmt.Fprintf(w, "\t%s\t%s\t", fmtID(id), name) + switch l { + case 0: + fmt.Fprint(w, "Empty, no accounts for web administration\n") + case 1: + fmt.Fprint(w, "1 Google account allowed to sign-in\n") + default: + fmt.Fprintf(w, "%d Google accounts allowed to sign-in\n", l) + } default: - nl(w) - lead(w, id, name, val, field) - } -} - -// LocalSkip skips the configurations that are inaccessible in local mode. -func LocalSkip(name string) bool { - switch name { - case - "ReadMode", - "ProductionMode", - "TLSPort", - "HTTPSRedirect", - "NoCrawl", - logger, - "MaxProcs": - return true - } - return false -} - -// AccountSkip skips the configurations that are not used when using Google OAuth2 -// is not enabled or when the server is in read-only mode. -func AccountSkip(name string) bool { - switch name { - case - "GoogleClientID", - "GoogleIDs", - "SessionKey", - "SessionMaxAge": - return true + value(w, id, name, val) } - return false } // StaticThumb returns the path to the thumbnail directory. @@ -406,29 +443,7 @@ func (c Config) UseTLSLocal() bool { } // Override the configuration settings fetched from the environment. -func (c *Config) Override(localMode bool) { - // Build binary, environment variables overrides using, - // go build -ldflags="-X 'main.LocalMode=true'" - if localMode { - if c.HTTPPort == 0 { - c.HTTPPort = HTTPPort - } - c.LocalMode = true - c.ProductionMode = false - c.ReadMode = true - c.NoCrawl = true - c.LogDir = "" - c.GoogleClientID = "" - c.GoogleIDs = "" - c.SessionKey = "" - c.SessionMaxAge = 0 - c.TLSPort = 0 - c.TLSCert = "" - c.TLSKey = "" - c.HTTPSRedirect = false - c.MaxProcs = 0 - return - } +func (c *Config) Override() { // hash and delete any supplied google ids ids := strings.Split(c.GoogleIDs, ",") for _, id := range ids { @@ -438,6 +453,7 @@ func (c *Config) Override(localMode bool) { c.GoogleIDs = "overwrite placeholder" c.GoogleIDs = "" // empty the string + // set the default HTTP port if both ports are configured to zero if c.HTTPPort == 0 && c.TLSPort == 0 { c.HTTPPort = HTTPPort } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 0dedecb6..85e3342c 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,6 +1,8 @@ package config_test import ( + "fmt" + "io" "io/fs" "os" "path/filepath" @@ -28,14 +30,16 @@ func TestDownloadFS(t *testing.T) { defer os.RemoveAll(dir) // create and test empty, mock image files + const noExt = "" exts := []string{ - ".txt", - ".webp", - ".png", - ".chiptune", - ".zip", - ".tiff", - ".svg", + ".txt", // valid + ".webp", // invalid + ".png", // invalid + ".chiptune", // valid + ".zip", // valid + ".tiff", // invalid + ".svg", // invalid + noExt, // valid } const invalid = "invalid-base-name" for _, ext := range exts { @@ -47,7 +51,9 @@ func TestDownloadFS(t *testing.T) { _ = helper.Touch(cfName) } - const expectedCount = 21 + const expectedCount = 24 + const expectedResult = 12 + i, err := helper.Count(dir) require.NoError(t, err) assert.Equal(t, expectedCount, i) @@ -69,6 +75,7 @@ func TestDownloadFS(t *testing.T) { return nil } err = config.DownloadFS(nil, path) + fmt.Fprintln(io.Discard, path) require.NoError(t, err) return nil }) @@ -77,7 +84,6 @@ func TestDownloadFS(t *testing.T) { i, err = helper.Count(dir) require.NoError(t, err) - const expectedResult = 8 assert.Equal(t, expectedResult, i) } @@ -89,14 +95,16 @@ func TestRemoveDownload(t *testing.T) { defer os.RemoveAll(dir) // create and test empty, mock image files + const noExt = "" exts := []string{ - ".txt", - ".webp", - ".png", - ".chiptune", - ".zip", - ".tiff", - ".svg", + ".txt", // valid + ".webp", // invalid + ".png", // invalid + ".chiptune", // valid + ".zip", // valid + ".tiff", // invalid + ".svg", // invalid + noExt, // valid } const invalid = "invalid-base-name" for _, ext := range exts { @@ -108,7 +116,9 @@ func TestRemoveDownload(t *testing.T) { _ = helper.Touch(cfName) } - const expectedCount = 21 + const expectedCount = 24 + const expectedResult = 12 + i, err := helper.Count(dir) require.NoError(t, err) assert.Equal(t, expectedCount, i) @@ -130,6 +140,7 @@ func TestRemoveDownload(t *testing.T) { return nil } name := filepath.Base(path) + fmt.Fprintln(io.Discard, name, path) err = config.RemoveDownload(name, path) require.NoError(t, err) return nil @@ -139,7 +150,6 @@ func TestRemoveDownload(t *testing.T) { i, err = helper.Count(dir) require.NoError(t, err) - const expectedResult = 8 assert.Equal(t, expectedResult, i) } @@ -210,18 +220,13 @@ func TestOverride(t *testing.T) { c := config.Config{} assert.Empty(t, c) c.GoogleIDs = "googleids,googleids2,googleids3" - c.Override(false) + c.Override() // confirm override assert.Empty(t, c.GoogleIDs) // confirm, required default port if not set assert.Equal(t, uint(config.HTTPPort), c.HTTPPort) // defaults - assert.False(t, c.LocalMode) - assert.False(t, c.ReadMode) - - c.Override(true) - assert.True(t, c.LocalMode) - assert.True(t, c.ReadMode) + assert.False(t, c.ReadOnly) } func td(name string) string { @@ -242,7 +247,7 @@ func TestConfig_String(t *testing.T) { t.Parallel() c := config.Config{} s := c.String() - assert.Contains(t, s, "active configuration options") + assert.Contains(t, s, "Defacto2 server configuration") } func TestConfig_Addresses(t *testing.T) { @@ -267,26 +272,6 @@ func TestConfig_Startup(t *testing.T) { assert.Contains(t, s, "http://localhost:8080") } -func TestLocalSkip(t *testing.T) { - t.Parallel() - skip := config.LocalSkip("") - assert.False(t, skip) - skip = config.LocalSkip("readmode") - assert.False(t, skip) - skip = config.LocalSkip("ReadMode") - assert.True(t, skip) -} - -func TestAccountSkip(t *testing.T) { - t.Parallel() - skip := config.AccountSkip("") - assert.False(t, skip) - skip = config.AccountSkip("googleids") - assert.False(t, skip) - skip = config.AccountSkip("GoogleIDs") - assert.True(t, skip) -} - func TestConfig_Checks(t *testing.T) { t.Parallel() c := config.Config{} @@ -300,8 +285,8 @@ func TestConfig_Checks(t *testing.T) { err = c.Checks(logger()) require.NoError(t, err) - c.ReadMode = false - c.ProductionMode = true + c.ReadOnly = false + c.ProdMode = true require.NoError(t, err) err = c.Checks(logger()) require.NoError(t, err) diff --git a/internal/config/error.go b/internal/config/error.go index 3d9be1f8..8124b5d8 100644 --- a/internal/config/error.go +++ b/internal/config/error.go @@ -23,10 +23,10 @@ var ( // CustomErrorHandler handles customer error templates. func (c Config) CustomErrorHandler(err error, ctx echo.Context) { - logger := zaplog.Development().Sugar() - if c.ProductionMode { - root := c.LogDir - logger = zaplog.Production(root).Sugar() + logger := zaplog.Debug().Sugar() + if c.ProdMode { + root := c.AbsLog + logger = zaplog.Store(root).Sugar() } defer func() { _ = logger.Sync() diff --git a/internal/config/repair.go b/internal/config/repair.go index c760274e..ecab05ea 100644 --- a/internal/config/repair.go +++ b/internal/config/repair.go @@ -20,7 +20,9 @@ const ( cfid = "00000000-0000-0000-0000000000000000" // coldfusion uuid example ) -var ErrIsDir = errors.New("is directory") +var ( + ErrIsDir = errors.New("is directory") +) // RepairFS, on startup check the file system directories for any invalid or unknown files. // If any are found, they are removed without warning. @@ -28,7 +30,7 @@ func (c Config) RepairFS(logger *zap.SugaredLogger) error { if logger == nil { return ErrZap } - dirs := []string{c.PreviewDir, c.ThumbnailDir} + dirs := []string{c.AbsPreview, c.AbsThumbnail} p, t := 0, 0 for _, dir := range dirs { if _, err := os.Stat(dir); err != nil { @@ -43,12 +45,12 @@ func (c Config) RepairFS(logger *zap.SugaredLogger) error { return RemoveDir(name, path, dir) } switch dir { - case c.PreviewDir: - if filepath.Ext(name) != ".webp" { + case c.AbsPreview: + if filepath.Ext(name) == ".png" { p++ } - case c.ThumbnailDir: - if filepath.Ext(name) != ".webp" { + case c.AbsThumbnail: + if filepath.Ext(name) == ".png" { t++ } } @@ -58,13 +60,24 @@ func (c Config) RepairFS(logger *zap.SugaredLogger) error { return fmt.Errorf("filepath.Walk: %w", err) } switch dir { - case c.PreviewDir: - logger.Infof("The preview directory contains, %d images: %s", p, dir) - case c.ThumbnailDir: - logger.Infof("The thumb directory contains, %d images: %s", t, dir) + case c.AbsPreview: + containsInfo(logger, "preview", p) + case c.AbsThumbnail: + containsInfo(logger, "thumb", t) } } - return DownloadFS(logger, c.DownloadDir) + return DownloadFS(logger, c.AbsDownload) +} + +func containsInfo(logger *zap.SugaredLogger, name string, count int) { + if logger == nil { + return + } + if MinimumFiles > count { + logger.Warnf("The %s directory contains %d files, which is less than the minimum of %d", name, count, MinimumFiles) + return + } + logger.Infof("The %s directory contains %d files", name, count) } // DownloadFS, on startup check the download directory for any invalid or unknown files. @@ -79,20 +92,53 @@ func DownloadFS(logger *zap.SugaredLogger, dir string) error { return fmt.Errorf("filepath.WalkDir: %w", err) } name := d.Name() - if filepath.Ext(name) == "" { - count++ - } if d.IsDir() { return RemoveDir(name, path, dir) } - return RemoveDownload(name, path) + if err = RemoveDownload(name, path); err != nil { + return fmt.Errorf("RemoveDownload: %w", err) + } + if filepath.Ext(name) == "" { + count++ + } + return RenameDownload(name, path) }) if err != nil { return fmt.Errorf("filepath.WalkDir: %w", err) } - if logger != nil { - logger.Infof("The downloads directory contains, %d files: %s", count, dir) + containsInfo(logger, "downloads", count) + return nil +} + +// RenameDownload, rename the download file if the basename uses an invalid coldfusion uuid. +func RenameDownload(basename, absPath string) error { + st, err := os.Stat(absPath) + if err != nil { + return nil + } + if st.IsDir() { + return fmt.Errorf("%w: %s", ErrIsDir, absPath) + } + + ext := filepath.Ext(basename) + rawname, found := strings.CutSuffix(basename, ext) + if !found { + return nil + } + const cflen = len(cfid) // coldfusion uuid length + if len(rawname) != cflen { + return nil + } + + newname, _ := helper.CFToUUID(rawname) + if err := uuid.Validate(newname); err != nil { + return fmt.Errorf("uuid.Validate %q: %w", newname, err) } + dir := filepath.Dir(absPath) + oldpath := filepath.Join(dir, basename) + newpath := filepath.Join(dir, newname+ext) + + rename(oldpath, "renamed invalid cfid", newpath) return nil } @@ -118,7 +164,7 @@ func RemoveDir(name, path, root string) error { // If any are found, they are removed without warning. // Basename must be the name of the file with a valid file extension. // -// Valid file extensions are .chiptune, .txt, and .zip. +// Valid file extensions are none, .chiptune, .txt, and .zip. func RemoveDownload(basename, path string) error { st, err := os.Stat(path) if err != nil { @@ -127,25 +173,10 @@ func RemoveDownload(basename, path string) error { if st.IsDir() { return fmt.Errorf("%w: %s", ErrIsDir, path) } - - const cflen = len(cfid) // coldfusion uuid length - + const filedownload = "" ext := filepath.Ext(basename) switch ext { - case ".chiptune", ".txt": - return nil - } - if filename, found := strings.CutSuffix(basename, ext); found { - if len(filename) == cflen { - filename, _ = helper.CFToUUID(filename) - } - if err1 := uuid.Validate(filename); err1 != nil { - remove(basename, "remove invalid uuid", path) - return nil //nolint:nilerr - } - } - switch ext { - case ".zip": + case filedownload, ".chiptune", ".txt", ".zip": return nil default: remove(basename, "remove invalid ext", path) @@ -197,3 +228,8 @@ func remove(name, info, path string) { fmt.Fprintf(os.Stderr, "%s: %s\n", info, name) defer os.Remove(path) } + +func rename(oldpath, info, newpath string) { + fmt.Fprintf(os.Stderr, "%s: %s\n", info, oldpath) + defer os.Rename(oldpath, newpath) +} diff --git a/internal/postgres/postgres.go b/internal/postgres/postgres.go index b05e665b..86325412 100644 --- a/internal/postgres/postgres.go +++ b/internal/postgres/postgres.go @@ -1,6 +1,7 @@ // Package postgres connects to and interacts with a PostgreSQL database server. // The functions are specific to the Postgres platform rather than more generic or // interchangeable SQL statements. +// // The postgres/models directory is generated by SQLBoiler and should not be modified. package postgres @@ -9,9 +10,6 @@ import ( "errors" "fmt" "net/url" - "reflect" - "strings" - "text/tabwriter" "github.com/caarlos0/env/v10" _ "github.com/jackc/pgx/v5/stdlib" // Use a lowlevel PostgreSQL driver. @@ -24,163 +22,57 @@ var ( ) const ( - EnvPrefix = "PS_" // EnvPrefix is the prefix for all server environment variables. - DockerHost = "host.docker.internal" // DockerHost is the hostname of the internal Docker container. - DriverName = "pgx" // DriverName of the database. - Protocol = "postgres" // Protocol of the database driver. + // DefaultURL is an example PostgreSQL connection string, it must not be used in production. + DefaultURL = "postgres://root:example@localhost:5432/defacto2_ps" + // DriverName of the database. + DriverName = "pgx" + // Protocol of the database driver. + Protocol = "postgres" ) -// Connection details of the PostgreSQL database connection. -type Connection struct { - HostName string `env:"HOST_NAME" envDefault:"localhost" help:"Provide the name of the server host to which to connect"` - Database string `env:"DATABASE" envDefault:"defacto2-ps" help:"Provide the name of the database to which to connect"` - Username string `env:"USERNAME" help:"Provide a username of a database account used to connect"` - Password string `env:"PASSWORD" help:"Provide a password for the database account used to connect"` - Protocol string // Protocol scheme of the PostgreSQL database. Defaults to postgres. - HostPort int `env:"HOST_PORT" envDefault:"5432" help:"Port number the Postgres database server is listening on"` - NoSSLMode bool `env:"NO_SSL" envDefault:"true" help:"Connect to the database using an insecure, plain text connection"` -} - -// Open opens a PostgreSQL database connection. -func (c Connection) Open() (*sql.DB, error) { - conn, err := sql.Open(DriverName, c.URL()) - if err != nil { - return nil, fmt.Errorf("sql.Open: %w", err) - } - return conn, nil -} - -// Check the connection values and print any issues or feedback to the logger. -func (c Connection) Check(logger *zap.SugaredLogger, local bool) error { - if logger == nil { - return ErrZap - } - if c.HostName == "" { - logger.Warn("The database connection host name is empty.") - } - if c.HostPort == 0 { - logger.Warn("The database connection host port is set to 0.") - } - if !local && c.NoSSLMode { - logger.Warn("The database connection is using an insecure, plain text connection.") - } - switch { - case c.Username == "" && c.Password != "": - logger.Info("The database connection username is empty but the password is set.") - case c.Username == "": - logger.Info("The database connection username is empty.") - case c.Password == "": - logger.Info("The database connection password is empty.") - } - return nil -} - // New initializes the connection with default values or values from the environment. func New() (Connection, error) { - c := Connection{} - c.Protocol = Protocol - if err := env.ParseWithOptions( - &c, env.Options{Prefix: EnvPrefix}); err != nil { + c := Connection{ + URL: DefaultURL, + } + if err := env.Parse(&c); err != nil { return Connection{}, fmt.Errorf("%w: %w", ErrEnv, err) } - return c, nil } // ConnectDB connects to the PostgreSQL database. func ConnectDB() (*sql.DB, error) { - ds, err := New() + dataSource, err := New() if err != nil { return nil, fmt.Errorf("new connection db: %w", err) } - conn, err := sql.Open(DriverName, ds.URL()) + conn, err := sql.Open(DriverName, dataSource.URL) if err != nil { return nil, fmt.Errorf("sql.Open: %w", err) } return conn, nil } -// URL returns a url used as a PostgreSQL database connection. -// -// An example connection "postgres://username:password@localhost:5432/postgres?sslmode=disable" -func (c Connection) URL() string { - if c.Protocol == "" { - c.Protocol = Protocol - } - var usr *url.Userinfo - if c.Username != "" && c.Password != "" { - usr = url.UserPassword(c.Username, c.Password) - } else if c.Username != "" { - usr = url.User(c.Username) - } - dns := url.URL{ - Scheme: c.Protocol, - User: usr, - Host: fmt.Sprintf("%s:%d", c.HostName, c.HostPort), - Path: c.Database, - } - if c.NoSSLMode { - q := dns.Query() - q.Set("sslmode", "disable") - dns.RawQuery = q.Encode() - } - return dns.String() +// Connection details of the PostgreSQL database connection. +type Connection struct { + URL string `env:"D2_DATABASE_URL"` // unsetting this value will cause the default to be used after a single use } -// Configurations prints a list of active connection configurations. -func (c Connection) Configurations(b *strings.Builder) *strings.Builder { - const ( - minwidth = 2 - tabwidth = 4 - padding = 2 - padchar = ' ' - flags = 0 - h1 = "Configuration" - h2 = "Value" - h3 = "Environment variable" - h4 = "Value type" - h5 = "Information" - line = "─" - donotuse = 7 - ) - - fields := reflect.VisibleFields(reflect.TypeOf(c)) - values := reflect.ValueOf(c) - w := tabwriter.NewWriter(b, minwidth, tabwidth, padding, padchar, flags) - nl := func() { - fmt.Fprintf(w, "\t\t\t\t\n") +// Validate the connection URL and print any issues to the logger. +func (c Connection) Validate(logger *zap.SugaredLogger) error { + if logger == nil { + return ErrZap } - - fmt.Fprint(b, "PostgreSQL database connection configuration.\n\n") - fmt.Fprintf(w, "\t%s\t%s\t%s\t%s\n", - h1, h3, h2, h5) - fmt.Fprintf(w, "\t%s\t%s\t%s\t%s\n", - strings.Repeat(line, len(h1)), - strings.Repeat(line, len(h3)), - strings.Repeat(line, len(h2)), - strings.Repeat(line, len(h5))) - - for _, field := range fields { - if !field.IsExported() { - continue - } - help := field.Tag.Get("help") - if help == "" { - continue - } - val := values.FieldByName(field.Name) - id := field.Name - name := EnvPrefix + field.Tag.Get("env") - lead := func() { - fmt.Fprintf(w, "\t%s\t%s\t%v\t%s.\n", id, name, val, help) - } - if id == "Password" && val.String() == c.Password { - fmt.Fprintf(w, "\t%s\t%s\t%v\t%s.\n", id, name, "******", help) - continue - } - lead() + if c.URL == "" { + logger.Warn("The database connection host name is empty") } - nl() - w.Flush() - return b + u, err := url.Parse(c.URL) + if err != nil { + logger.Warn("The database connection URL is invalid, ", err) + } + if u.Scheme != Protocol { + logger.Warnf("The database connection scheme is not: %s", Protocol) + } + return nil } diff --git a/internal/zaplog/zaplog.go b/internal/zaplog/zaplog.go index 67272718..c539b832 100644 --- a/internal/zaplog/zaplog.go +++ b/internal/zaplog/zaplog.go @@ -57,8 +57,8 @@ const ( ) */ -// CLI logger prints all log levels to stdout but without callers. -func CLI() *zap.Logger { +// Status logger prints all log levels to stdout but without callers. +func Status() *zap.Logger { enc := consoleNoTime() defaultLogLevel := zapcore.InfoLevel core := zapcore.NewTee( @@ -71,9 +71,23 @@ func CLI() *zap.Logger { return zap.New(core) } -// Development logger prints all log levels to stdout. -func Development() *zap.Logger { - enc := console() +// Timestamp logger prints all log levels to stdout but without callers. +func Timestamp() *zap.Logger { + enc := consoleWithTime() + defaultLogLevel := zapcore.InfoLevel + core := zapcore.NewTee( + zapcore.NewCore( + enc, + zapcore.AddSync(os.Stdout), + defaultLogLevel, + ), + ) + return zap.New(core) +} + +// Debug logger prints all log levels to stdout. +func Debug() *zap.Logger { + enc := consoleWithTime() defaultLogLevel := zapcore.DebugLevel core := zapcore.NewTee( zapcore.NewCore( @@ -85,17 +99,17 @@ func Development() *zap.Logger { return zap.New(core, zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel)) } -// Production logger prints all info and higher log levels to files. +// Store logger prints all info and higher log levels to files. // Fatal and Panics are also returned to os.Stderr. -func Production(root string) *zap.Logger { +func Store(absPath string) *zap.Logger { config := zap.NewProductionEncoderConfig() config.EncodeTime = zapcore.TimeEncoderOfLayout("Jan-02-15:04:05.00") jsonEnc := zapcore.NewJSONEncoder(config) - enc := console() + enc := consoleWithTime() // server breakage log serverWr := zapcore.AddSync(&lumberjack.Logger{ - Filename: filepath.Join(root, ServerLog), + Filename: filepath.Join(absPath, ServerLog), MaxSize: MaxSizeMB, MaxBackups: MaxBackups, MaxAge: MaxDays, @@ -105,7 +119,7 @@ func Production(root string) *zap.Logger { // information and warning log infoWr := zapcore.AddSync(&lumberjack.Logger{ - Filename: filepath.Join(root, InfoLog), + Filename: filepath.Join(absPath, InfoLog), MaxSize: MaxSizeMB, MaxBackups: MaxBackups, MaxAge: MaxDays, @@ -127,7 +141,7 @@ func Production(root string) *zap.Logger { } // console returns a logger in color and time. -func console() zapcore.Encoder { +func consoleWithTime() zapcore.Encoder { config := zap.NewDevelopmentEncoderConfig() config.EncodeTime = zapcore.TimeEncoderOfLayout("15:04:05") config.EncodeLevel = zapcore.CapitalColorLevelEncoder diff --git a/internal/zaplog/zaplog_test.go b/internal/zaplog/zaplog_test.go index 552d8de6..333d5cad 100644 --- a/internal/zaplog/zaplog_test.go +++ b/internal/zaplog/zaplog_test.go @@ -8,16 +8,16 @@ import ( ) func TestCLI(t *testing.T) { - logr := zaplog.CLI() + logr := zaplog.Status() assert.NotNil(t, logr) } func TestLog(t *testing.T) { - logr := zaplog.Development() + logr := zaplog.Debug() assert.NotNil(t, logr) } func TestProduction(t *testing.T) { - logr := zaplog.Production("") + logr := zaplog.Store("") assert.NotNil(t, logr) } diff --git a/public/image/milestone/cyber_strike_the_tornato.png b/public/image/milestone/cyber_strike_the_tornato.png new file mode 100644 index 00000000..b6b604fc Binary files /dev/null and b/public/image/milestone/cyber_strike_the_tornato.png differ diff --git a/public/image/milestone/cyber_strike_the_tornato.webp b/public/image/milestone/cyber_strike_the_tornato.webp new file mode 100644 index 00000000..a1600bce Binary files /dev/null and b/public/image/milestone/cyber_strike_the_tornato.webp differ diff --git a/public/image/milestone/dungbeetles.png b/public/image/milestone/dungbeetles.png deleted file mode 100644 index e684c021..00000000 Binary files a/public/image/milestone/dungbeetles.png and /dev/null differ diff --git a/public/image/milestone/dungbeetles.webp b/public/image/milestone/dungbeetles.webp deleted file mode 100644 index 4d48890d..00000000 Binary files a/public/image/milestone/dungbeetles.webp and /dev/null differ diff --git a/public/image/milestone/tcommand.png b/public/image/milestone/tcommand.png new file mode 100644 index 00000000..b63f36f0 Binary files /dev/null and b/public/image/milestone/tcommand.png differ diff --git a/server.go b/server.go index cf5ec7b2..d42e8dd9 100644 --- a/server.go +++ b/server.go @@ -1,23 +1,27 @@ -// Package main is the entry point for the Defacto2 server application. -// -// Use the Task runner / build tool (https://taskfile.dev) to build or run the source code. -// $ task --list -// -// Repository: https://github.com/Defacto2/server -// Website: https://defacto2.net -// License: -// -// © Defacto2, 2024 package main +/* +Package main is the entry point for the Defacto2 server application. + +Use the Task runner / build tool (https://taskfile.dev) to build or run the source code. +$ task --list + +Repository: https://github.com/Defacto2/server +Website: https://defacto2.net +License: + +© Defacto2, 2024 +*/ + import ( "context" + "database/sql" "embed" "errors" "fmt" + "io" "os" "runtime" - "strconv" "strings" "github.com/Defacto2/server/cmd" @@ -47,13 +51,6 @@ var view embed.FS // version is generated by the GoReleaser ldflags. var version string -// LocalMode is used to always override the PRODUCTION_MODE and READ_ONLY environment variables. -// It removes the option to set a number environment variables when running the server locally. -// This is set using the -ldflags option when building the app. -// -// Example, go build -ldflags="-X 'main.LocalMode=true'", this will set the LocalMode variable to true. -var LocalMode string //nolint:gochecknoglobals - var ( ErrCmd = errors.New("the command given did not work") ErrDB = errors.New("could not initialize the database data") @@ -71,19 +68,33 @@ func main() { if code := parseFlags(logger, configs); code >= 0 { os.Exit(code) } + var w io.Writer = os.Stdout + if configs.Quiet { + w = io.Discard + } + fmt.Fprintf(w, "%s\n", configs) - defer sanityChecks(logger, configs) - defer repairChecks(logger, configs) + db, err := postgres.ConnectDB() + if err != nil { + logger.Errorf("%s: %s", ErrDB, err) + } + defer db.Close() + var ver postgres.Version + if err := ver.Query(); err != nil { + logger.Errorf("ver.Query: %w", err) + } + + repairChecks(logger, db, configs) + sanityChecks(logger, configs) - logger = serverLog(configs) - website := newInstance(configs) + website := newInstance(configs, db) + logger = serverLog(configs, website.RecordCount) router := website.Controller(logger) - website.Info(logger) + website.Info(logger, w) if err := website.Start(router, logger, configs); err != nil { logger.Fatalf("%s: please check the environment variables.", err) } - w := os.Stdout go func() { localIPs, err := configs.Addresses() if err != nil { @@ -91,23 +102,25 @@ func main() { } fmt.Fprintf(w, "%s\n", localIPs) }() - - if localMode() { - go func() { - fmt.Fprint(w, "Tap Ctrl + C, to exit at anytime.\n") - }() - } website.ShutdownHTTP(router, logger) } // environmentVars is used to parse the environment variables and set the Go runtime. +// Defaults are used if the environment variables are not set. func environmentVars() (*zap.SugaredLogger, config.Config) { - logger := zaplog.Development().Sugar() - configs := config.Config{} + logger := zaplog.Status().Sugar() + configs := config.Config{ + Compression: true, + DatabaseURL: postgres.DefaultURL, + HTTPPort: config.HTTPPort, + ProdMode: true, + ReadOnly: true, + SessionMaxAge: config.SessionHours, + } if err := env.Parse(&configs); err != nil { logger.Fatalf("%w: %s", ErrEnv, err) } - configs.Override(localMode()) + configs.Override() if i := configs.MaxProcs; i > 0 { runtime.GOMAXPROCS(int(i)) @@ -116,7 +129,7 @@ func environmentVars() (*zap.SugaredLogger, config.Config) { } // newInstance is used to create the server controller instance. -func newInstance(configs config.Config) handler.Configuration { +func newInstance(configs config.Config, db *sql.DB) handler.Configuration { c := handler.Configuration{ Brand: brand, Environment: configs, @@ -127,7 +140,7 @@ func newInstance(configs config.Config) handler.Configuration { if c.Version == "" { c.Version = cmd.Commit("") } - c.RecordCount = recordCount() + c.RecordCount = recordCount(db) return c } @@ -151,21 +164,21 @@ func parseFlags(logger *zap.SugaredLogger, configs config.Config) int { } // sanityChecks is used to perform a number of sanity checks on the file assets and database. -// These are skipped if the FastStart environment variable is set. +// These are skipped if the Production mode environment variable is set.to false. func sanityChecks(logger *zap.SugaredLogger, configs config.Config) { - if configs.FastStart || logger == nil { + if !configs.ProdMode || logger == nil { return } if err := configs.Checks(logger); err != nil { logger.Errorf("%s: %s", ErrEnv, err) } - checks(logger, configs.ReadMode) + checks(logger, configs.ReadOnly) conn, err := postgres.New() if err != nil { logger.Errorf("%s: %s", ErrDB, err) return } - _ = conn.Check(logger, localMode()) + _ = conn.Validate(logger) } // checks is used to confirm the required commands are available. @@ -198,76 +211,47 @@ func checks(logger *zap.SugaredLogger, readonly bool) { } // repairChecks is used to fix any known issues with the file assets and the database entries. -// These are skipped if the FastStart environment variable is set. -func repairChecks(logger *zap.SugaredLogger, configs config.Config) { - if configs.FastStart || logger == nil { +// These are skipped if the Production mode environment variable is set to false. +func repairChecks(logger *zap.SugaredLogger, db *sql.DB, configs config.Config) { + if !configs.ProdMode || logger == nil { return } if err := configs.RepairFS(logger); err != nil { logger.Errorf("%s: %s", ErrFS, err) } - if err := repairDB(logger); err != nil { + if err := repairDB(logger, db); err != nil { repairdb(logger, err) } } // serverLog is used to setup the logger for the server and print the startup message. -func serverLog(configs config.Config) *zap.SugaredLogger { - logger := zaplog.Development().Sugar() - const welcome = "Welcome to the local Defacto2 web application." - logger.Info(welcome) - if localMode() { - return logger - } - mode := "read-only mode" - if !configs.ReadMode { - mode = "write mode" +func serverLog(configs config.Config, count int) *zap.SugaredLogger { + logger := zaplog.Timestamp().Sugar() + const welcome = "Welcome to the Defacto2 web application" + switch { + case count == 0: + logger.Error(welcome + " with no database records") + case config.MinimumFiles > count: + logger.Warnf(welcome+" with only %d records, expecting at least %d+", count, config.MinimumFiles) + default: + logger.Infof(welcome+" containing %d records", count) } - switch configs.ProductionMode { - case true: + if configs.ProdMode { if err := configs.LogStore(); err != nil { logger.Fatalf("%w: %s", ErrLog, err) } - logger = zaplog.Production(configs.LogDir).Sugar() - s := "The server is running in a " - s += strings.ToUpper("production, "+mode) + "." - logger.Info(s) - default: - s := "The server is running in a " - s += strings.ToUpper("development, "+mode) + "." - logger.Warn(s) + logger = zaplog.Store(configs.AbsLog).Sugar() } return logger } -// localMode is used to always override the PRODUCTION_MODE and READ_ONLY environment variables. -func localMode() bool { - val, err := strconv.ParseBool(LocalMode) - if err != nil { - return false - } - return val -} - // repairDB on startup checks the database connection and make any data corrections. -func repairDB(logger *zap.SugaredLogger) error { +func repairDB(logger *zap.SugaredLogger, db *sql.DB) error { if logger == nil { return fmt.Errorf("%w: %s", ErrLog, "no logger") } - db, err := postgres.ConnectDB() - if err != nil { - return fmt.Errorf("postgres.ConnectDB: %w", err) - } - defer db.Close() - var ver postgres.Version - if err := ver.Query(); err != nil { - return ErrVer - } - if localMode() { - return nil - } ctx := context.Background() - err = fix.All.Run(ctx, logger, db) + err := fix.All.Run(ctx, logger, db) if err != nil { return fmt.Errorf("fix.All.Run: %w", err) } @@ -287,12 +271,7 @@ func repairdb(logger *zap.SugaredLogger, err error) { } // recordCount returns the number of records in the database. -func recordCount() int { - db, err := postgres.ConnectDB() - if err != nil { - return 0 - } - defer db.Close() +func recordCount(db *sql.DB) int { ctx := context.Background() fs, err := models.Files(qm.Where(model.ClauseNoSoftDel)).Count(ctx, db) if err != nil { diff --git a/view/app/categories.tmpl b/view/app/categories.tmpl index 9043a1ed..df4a0d07 100644 --- a/view/app/categories.tmpl +++ b/view/app/categories.tmpl @@ -10,10 +10,80 @@ {{- /* note row-cols-sm-* is the smallest row-cols value */}} {{- $rowCols := "row row-cols-1 row-cols-sm-2 row-cols-md-5 g-3 p-3 g-md-0 p-md-0"}}
It seems to be unprotected, if you find anything leave us a message..
+
+
Cracktros and loaders are mini adverts created by cracking groups to announce their @@ -41,38 +111,14 @@ As software repackaging by Scene groups became more complicated, there was a need to develop installation programs that walk the users through installing their ill-gotten wares.
Browse the installers -- Demoscene productions are artistic apps that generate visuals and audio to show the creators' programming, audio, and graphic abilities. Early demo productions developed within warez scene before they distanced themselves and spun into their own community. -
- Browse the demos - - {{- if .stats }} -Text files are generally in every scene release. @@ -88,28 +134,21 @@ A proof of a release is usually a photograph to verify that physical media sold in retail stores is the source of the warez release.
Browse the proofs - {{- if .stats }}+ Demoscene productions are artistic apps that generate visuals and audio to show the creators' programming, audio, and graphic abilities. Early demo productions developed within warez scene before they distanced themselves and spun into their own community. +
+ Browse the demosStylized ANSI text art was commonly created for the theming and advertising of Bulletin Board Systems. @@ -127,29 +166,17 @@
- Bulletin Board System swere like proto-websites that offered message forums, real-time chat, and file sharing but used a text user interface and ran on the landline telephone network. + Bulletin Board Systems were like proto-websites that offered message forums, real-time chat, and file sharing but used a text user interface and ran on the landline telephone network. Users had to use their PCs or microcomputers with a modem peripheral to dial and connect to the individual boards.
Browse the adverts @@ -172,27 +199,12 @@FTP sites eventually supplanted BBS boards as The Scene moved onto the Internet in the early to mid-1990s. The utilitarian sites were more efficient at organizing and transferring files but lacked the social features of the earlier boards.
Browse the adverts - {{- if .stats }} -Browse the files for {{msdos}}
- {{- if .stats }} -dos date range: {{$rels.MsDos.MinYear}}-{{$rels.MsDos.MaxYear}} and dos count: {{ byteFile $rels.MsDos.Count $rels.MsDos.Bytes }}
- {{- end }}windows date range: {{$rels.Windows.MinYear}}-{{$rels.Windows.MaxYear}} and windows count: {{ byteFile $rels.Windows.Count $rels.Windows.Bytes }}
- {{- end }}mac date range: {{$rels.Macos.MinYear}}-{{$rels.Macos.MaxYear}} and mac count: {{ byteFile $rels.Macos.Count $rels.Macos.Bytes }}
- {{- end }}linux date range: {{$rels.Linux.MinYear}}-{{$rels.Linux.MaxYear}} and linux count: {{ byteFile $rels.Linux.Count $rels.Linux.Bytes }}
- {{- end }}Browse the shell scripts and software
- {{- if .stats }} -script date range: {{$rels.Script.MinYear}}-{{$rels.Script.MaxYear}} and script count: {{ byteFile $rels.Script.Count $rels.Script.Bytes }}
- {{- end }}java date range: {{$rels.Java.MinYear}}-{{$rels.Java.MaxYear}} and java count: {{ byteFile $rels.Java.Count $rels.Java.Bytes }}
- {{- end }}- You're at the website preserving the historic cracking and "warez" scene subcultures on personal computers. - It covers digital artifacts, including text files, demos, music, art, magazines, and other projects. - The nature of historical software piracy, with high churn and elusiveness, - means that the topic needed purposeful documentation, and that's what we do at Defacto2. + You're at the website preserving historic software cracking, wares, and Warez Scene subcultures on personal computers. + It is home to digital artifacts, including text files, demos, music, art, magazines, and other projects. + The nature of microcomputer software piracy, with high churn and elusiveness, + means that the topic needs purposeful documentation, and that's what is done at Defacto2.
Be aware occasional files hosted are NSFW, with lewd comments or imagery.
The remainder of this page chronologically shows the milestones for the microcomputer - industry and software piracy to offer insight into the birth of the Scene and personal computing. + industry and software piracy to offer insight into the birth of the Scene and personal computing. The events are not definitive but are based on the digital artifacts collected. - In the early days of modern computing, the terms micro-computer and personal-computer were interchangeable, - but these computing appliances all evolved from the more powerful mini-computers of the 1970s. + In the early days of modern computing, the terms microcomputer and personal computer were interchangeable. + Yet all these primative computing appliances evolved from the unaffordable and unwieldy business minicomputers of the 1970s.
- The Scene most probably originated in the USA in 1979 or 1980 on the Apple II computer platform. - While well-known, it wasn't the best-selling machine of the early microcomputer generation. - However, the system attracted classic hackers and curious personality types for various reasons. - It was the genesis of an era for influential computer game development, - early copy protection, piracy and online culture on computerized bulletin board systems. + The Scene most probably originated in the USA in 1979 or 1980 on the Apple II computer platform, + with the catalyst being the popularity of the platform's new floppy disc drive, + the inclusion of disk copy-protection, the availability of modems and usable software, + and the formation of early online messaging using computerized bulletin board systems. + It was the genesis of a new era for influential computer game development, + early copy protection, piracy groups, and online culture. + While famous today, the Apple wasn't the best-selling machine of the early microcomputer generation. + However, the system attracted classic hacker personalities and the technology-curious for diverse reasons.
- This period also saw the Atari 400/800 microcomputers introduction. + This period also saw the introduction of Atari's 400 and 800 microcomputers. Atari was late in releasing its graphically superior line of machines, so it created several non-interactive demonstration software titles with music and animation intended to sell the machines in-store. To encourage development, Atari formed APX, the Atari Program Exchange, which allowed Atari to publish user-written software. - Some titles, such as 1981's Graphics/Sound Demonstration, include source code and instructions for various effects to encourage new programmers to use the machines and demonstrate their capabilities, much like a Demoscene that later evolved. + Some titles, such as 1981's Graphics/Sound Demonstration, include source code and instructions for various programming effects to encourage new owners to use the machines and demonstrate their capabilities, much like a Demoscene that later evolved.
- The Scene concept spanned the Atlantic to Western Europe in 1984-85 to eventually thrive on the Commodore 64, + The Scene concept spanned the Atlantic to Western Europe in 1984-85 to eventually thrive on the Commodore 64, the world's all-time, highest-selling computer for decades. - North America and other markets also had their own booming Scene community on the Commodore, - but it's uncertain if they all materialized organically. + Small collectives of Commodore owners in Sweden, West Germany, and elsewhere would team up to import boxed software from the USA to digitally duplicate, occasionally crack, and repackage titles to share between friends and users. + Initially, this was due to the poor availability of retail software. + However, the communities that formed around exchanging pirated software made the Scene too compelling for many, even after the retail situation improved.
- Late in the decade, UK and European game developers and sceners moved - onto the more powerful 16-bit computer platforms by Atari and Commodore. + Late in the 1980s, UK and European game developers and Sceners moved + onto the more powerful 16-bit computer platforms led by Atari and Commodore. Due to the emphasis of sound and graphics on both machines, some in the European Scene pivoted to exclusively producing - digital artwork and multimedia, creating the Demoscene. -
-+ digital artwork and multimedia, creating the Demoscene. In the USA, where Atari and Commodore were based, their 16-bit computers failed in the local marketplace. The failure and other poor decisions eventually finished both companies and their influence. - North American consumers moved on to the business-oriented IBM PC platform, - later dominated by Intel and Microsoft with DOS and Windows. - And Apple was left as a niche player after it ditched its profitable Apple II platform - to favor the novel Macintosh computer line. + While Apple was left as a niche player after it ditched its popular Apple II platform to favor the novel Macintosh computer line. +
- Due to its modular and fragmented design, the Intel - Microsoft / IBM PC wasn't the best computer gaming platform during the 1980s and early 1990s. + North American consumers moved on to the business-oriented IBM PC platform, + later dominated by Intel and Microsoft with DOS and Windows. + Due to its modular and fragmented design, the PC wasn't the best computer gaming platform during the 1980s and early 1990s. And apart from the popular adventure and flight simulator genres, there were better choices for game development. - Instead, many North American gamers shifted to the Japanese video game console offerings by Nintendo and later Sega and Sony. + Instead, many American and Canadian gamers shifted to the Japanese video game console offerings by Nintendo and later Sega and Sony.
For the PC Scene, documentation for removing disk copy protection routines goes back to 1982, when individuals released text files and posted messages on USA bulletin board systems.
@@ -65,13 +67,13 @@
and even then, it took until 1988-89 to gain momentum.
Around this time, an Art Scene emerged on the PC,
creating text art for use on the elite
pirate and hacker bulletin board systems.
- However, they later broke away into a competitive community that produced art for art's sake.
+ However, they later broke away into a competitive community that produced art for art's sake.
- The European sceners only reluctantly joined the PC in and around 1990, - with most avoiding the platform until years later, only after it became apparent + The European Sceners only reluctantly joined the PC in and around 1990, + with most avoiding the platform until years later, and only after it became apparent that the software industry had moved on to the Intel - Microsoft PC and video game consoles. - Yet this convergence of Europeans, North Americans, and other nationalities on the same computer platform and later, the emerging home-access Internet formed one of the first global online communities. + Yet this convergence of Europeans, North Americans, and other nationalities on the same computer platform and, later, the emerging home-access Internet began one of the first global online communities.
- Do you have any files that Defacto2 could use? - You can use our Uploader to send us files we should host. + Do you have any Scene files that Defacto2 could use? You can use our Uploader to send us files we should host. + A person manually approves all uploads, which may take time to be available on the site. + But no inappropriate files or modern programs that the break the DMCA.
-- A human manually approves all uploads, so it may take time for them to be available on the site. - We will filter through what we can use: cracktros, intros, magazines, reports, text files, documents, articles, artwork or Scene related.
-But nothing the breaks the DMCA or inappropriate.
- + +There is a 100MB per file upload limit. Please contact us if you need access to an SFTP server to bypass this limit.