extract thought into its on package, standardize it
Jes Olson j3s@c3f.net
Fri, 06 Oct 2023 17:27:33 -0500
5 files changed,
146 insertions(+),
88 deletions(-)
M
atom/atom.go
→
atom/atom.go
@@ -1,46 +1,24 @@
package atom import ( - "bufio" "fmt" "log" "net/http" - "os" - "path/filepath" - "sort" - "strings" - "time" + + "git.j3s.sh/j3s.sh/thought" ) const maxFeed = 10 -var timeLayoutPost = "2006-01-02" -var timeLayoutAtom = "2006-01-02T15:04:05.000Z" - -type post struct { - title string - updated string - link string - // starting with content disabled because a lot of my content is nonsensical - // to any reasonable xml parser tbh - // content string -} - func Handler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/atom+xml") - posts, err := getPosts("thought/*") + posts, err := thought.Posts() if err != nil { log.Println(err) + return } - - sort.Slice(posts, func(i, j int) bool { - // we assume that timeLayoutAtom is correct here because - // it was passed up correctly hopefully - ti, _ := time.Parse(timeLayoutAtom, posts[i].updated) - tj, _ := time.Parse(timeLayoutAtom, posts[j].updated) - return ti.After(tj) - }) + thought.DateSort(posts) if len(posts) > maxFeed { posts = posts[:maxFeed]@@ -48,7 +26,7 @@ }
var updated string if len(posts) > 0 { - updated = posts[0].updated + updated = posts[0].Updated } fmt.Fprintf(w, `<?xml version="1.0" encoding="utf-8"?>@@ -64,8 +42,8 @@ </author>
<id>https://j3s.sh/</id> `, updated) + // maybe add p.Content here someday for _, p := range posts { - // add content in future? idk prob not fmt.Fprintf(w, ` <entry> <title>%s</title>@@ -73,58 +51,8 @@ <link href="%s"/>
<id>%s</id> <updated>%s</updated> </entry> -`, p.title, p.link, p.link, p.updated) +`, p.Title, p.Link, p.Link, p.Updated) } fmt.Fprint(w, ` </feed>`) } - -func getPosts(dir string) ([]post, error) { - var posts []post - files, err := filepath.Glob(dir) - if err != nil { - return nil, err - } - - for _, f := range files { - post, err := fileToPost(f) - if err != nil { - // this can be failed intentionally for drafts or posts too small for rss updoots - // add a print statement here if blog posts mysteriously vanish from the atom feed - // - // lmao - continue - } - posts = append(posts, post) - } - - return posts, err -} - -func fileToPost(file string) (post, error) { - var p post - - f, err := os.Open(file) - if err != nil { - return p, err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - scanner.Scan() // this moves to the next token - // strip spaces - title := scanner.Text() - p.title = strings.TrimSpace(title) - scanner.Scan() - timestr := strings.TrimSpace(scanner.Text()) - updated, err := time.Parse(timeLayoutPost, timestr) - if err != nil { - return p, err - } - // for scanner.Scan() { - // p.content = p.content + scanner.Text() - // } - p.updated = updated.Format(timeLayoutAtom) - p.link = "https://j3s.sh/" + file - return p, err -}
M
main.go
→
main.go
@@ -11,6 +11,7 @@ "log"
"math/rand" "net/http" "os" + "path" "path/filepath" "strings" "time"@@ -20,11 +21,9 @@ "filippo.io/age/armor"
"git.j3s.sh/j3s.sh/atom" "git.j3s.sh/j3s.sh/feed" "git.j3s.sh/j3s.sh/openlibrary" + "git.j3s.sh/j3s.sh/thought" "github.com/SlyMarbo/rss" ) - -//go:embed thought/* -var thoughtFiles embed.FS //go:embed templates var templateFiles embed.FS@@ -158,7 +157,7 @@ renderPage(w, r, r.URL.Path, templateData)
} func thoughtHandler(w http.ResponseWriter, r *http.Request) { - content, err := fs.ReadFile(thoughtFiles, strings.TrimPrefix(r.URL.Path, "/")) + post, err := thought.Post(path.Base(r.URL.Path)) if err != nil { log.Println(err) if os.IsNotExist(err) {@@ -169,7 +168,7 @@ }
return } - renderPage(w, r, "thought.html", string(content)) + renderPage(w, r, "thought.html", post) } func ageHandler(w http.ResponseWriter, r *http.Request) {
M
templates/reviews.html
→
templates/reviews.html
@@ -2,7 +2,7 @@ {{ define "reviews.html" }}
{{ template "head" . }} {{ template "nav" . }} <div id="main"> -<p>reviews have moved to https://abyss.j3s.sh +<p>reviews have moved to <a href="https://abyss.j3s.sh">abyss</a> <p>(see the "movies" wiki) </div> {{ end }}
M
templates/thought.html
→
templates/thought.html
@@ -12,14 +12,14 @@
<meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <link rel='shortcut icon' href='/static/favicon.ico'> - <title>jes post</title> + <title>{{ .Data.Title }}</title> </head> <div id="main"><body class="grayscale"> <a href="/"><img height="92" src="/static/unnamed-puffy.png" alt="another pufferfish drawn by rekka"></a> <a href="/"><img height="92" src="/static/unnamed-puffy-2.png" alt="yet another pufferfish drawn by rekka"></a> <a href="/"><img height="92" src="/static/unnamed-puffy-3.png" alt="yet another pufferfish drawn by rekka"></a> <hr> - <pre>{{ .Data }}</pre> + <pre>{{ .Data.Content }}</pre> </div></body> </html> {{end}}
A
thought/thought.go
@@ -0,0 +1,131 @@
+package thought + +import ( + "bufio" + "embed" + "io" + "io/fs" + "log" + "sort" + "strings" + "time" +) + +const ( + timeLayoutAtom = "2006-01-02T15:04:05.000Z" + timeLayoutPost = "2006-01-02" +) + +//go:embed *.html +var thoughtFS embed.FS + +// a ThoughtPost represents a single "thought post", duh +type ThoughtPost struct { + Link string + Published bool + Title string + Updated string + // Content includes the Title + Date + // in its "source format", which often means + // a bunch of aesthetic spaces and shit + Content string +} + +// parse takes a thought name & attempts to parse it +// from the thoughtFS into a struct. +// error = no post found +func parse(file string) (ThoughtPost, error) { + var p ThoughtPost + + f, err := thoughtFS.Open(file) + if err != nil { + return p, err + } + + scanner := bufio.NewScanner(f) + scanner.Scan() + // strip spaces + title := scanner.Text() + p.Title = strings.TrimSpace(title) + scanner.Scan() + timestr := strings.TrimSpace(scanner.Text()) + updated, err := time.Parse(timeLayoutPost, timestr) + // if the time parses, we can assume that jes's intent + // was to publish the post. + if err != nil { + p.Published = false + p.Updated = "1993-02-18" + } else { + p.Published = true + p.Updated = updated.Format(timeLayoutAtom) + } + p.Link = "https://j3s.sh/thought/" + file + f.Close() + + // i know, reopening a file again to read content + // is "very silly" as one would say. ha ha. is jes + // a bad programmer? no bitch, this was just the easiest + // way to do this, and it reads easily. the filesystem is + // in memory anyway, so we're really talking about a micro + // optimization. WHAT ELSE WOULD YOU HAVE ME DO? + // + // y has god forsaken me? + f, err = thoughtFS.Open(file) + if err != nil { + return p, err + } + defer f.Close() + + b, err := io.ReadAll(f) + if err != nil { + return p, err + } + + p.Content = string(b) + + return p, nil +} + +func Post(name string) (ThoughtPost, error) { + post, err := parse(name) + if err != nil { + return post, err + } + return post, nil +} + +// GetPublishedPosts gets all posts that have +// a valid title + date that parse according to +// my extremely shitty parser +// +// (line 1 = title, line 2 = date) +func Posts() ([]ThoughtPost, error) { + var thoughtPosts []ThoughtPost + + files, err := fs.Glob(thoughtFS, "*.html") + if err != nil { + log.Println(err) + return thoughtPosts, err + } + + for _, f := range files { + thoughtPost, err := parse(f) + if err != nil { + log.Println(err) + return thoughtPosts, err + } + thoughtPosts = append(thoughtPosts, thoughtPost) + } + + return thoughtPosts, err +} + +func DateSort(tp []ThoughtPost) { + sort.Slice(tp, func(i, j int) bool { + // we assume that timeLayoutAtom is correct here because + // it was passed up correctly hopefully + ti, _ := time.Parse(timeLayoutAtom, tp[i].Updated) + tj, _ := time.Parse(timeLayoutAtom, tp[j].Updated) + return ti.After(tj) + }) +}