-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathpages.go
403 lines (361 loc) · 10.6 KB
/
pages.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
package main
import (
"bufio"
"bytes"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"sync"
"time"
"github.com/spf13/viper"
)
// Loads a given wiki page and returns a page object.
// Used for building the initial cache and re-caching.
func buildPage(filename string) (*Page, error) {
file, err := os.Open(filename)
if err != nil {
log.Printf("%v\n", err.Error())
return nil, err
}
defer func() {
err = file.Close()
if err != nil {
log.Printf("%v\n", err.Error())
}
}()
stat, err := file.Stat()
if err != nil {
log.Printf("Couldn't stat %s: %v\n", filename, err.Error())
}
var body pagedata
body, err = ioutil.ReadAll(file)
if err != nil {
log.Printf("%v\n", err.Error())
}
_, shortname := filepath.Split(filename)
// get meta info on file from the header comment
title, desc, author := body.getMeta()
if title == "" {
title = shortname
}
if desc != "" {
confVars.mu.RLock()
desc = confVars.descSep + " " + desc
confVars.mu.RUnlock()
}
if author != "" {
author = "`by " + author + "`"
}
// longtitle is used in the <title> tags of the output html
confVars.mu.RLock()
longtitle := title + " " + confVars.titleSep + " " + confVars.wikiName
confVars.mu.RUnlock()
// store the raw bytes of the document after parsing
// from markdown to HTML.
// keep the unparsed markdown for future use (maybe gopher?)
bodydata := render(body, longtitle)
return newPage(filename, shortname, title, author, desc, stat.ModTime(), bodydata, body, false), nil
}
// Scan the page until reaching following fields in the
// header comment:
// title:
// author:
// description:
func (body pagedata) getMeta() (string, string, string) {
// a bit redundant, but scanner is simpler to use
bytereader := bytes.NewReader(body)
metafinder := bufio.NewScanner(bytereader)
var title, desc, author string
for metafinder.Scan() {
splitter := bytes.Split(metafinder.Bytes(), []byte(":"))
switch string(bytes.ToLower(splitter[0])) {
case "title":
title = string(bytes.TrimSpace(splitter[1]))
case "description":
desc = string(bytes.TrimSpace(splitter[1]))
case "author":
author = string(bytes.TrimSpace(splitter[1]))
default:
continue
}
if title != "" && desc != "" && author != "" {
break
}
}
return title, desc, author
}
// Checks the index page's cache. Returns true if the
// index needs to be re-cached.
// This method helps satisfy the cacher interface.
func (indexCache *indexCacheBlk) checkCache() bool {
// if the last tally time is past the
// interval in the config file, re-cache
if interval, err := time.ParseDuration(viper.GetString("IndexRefreshInterval")); err == nil {
indexCache.mu.RLock()
if time.Since(indexCache.page.LastTally) > interval {
indexCache.mu.RUnlock()
return true
}
indexCache.mu.RUnlock()
} else {
log.Printf("Couldn't parse index refresh interval: %v\n", err.Error())
}
// if the stored mod time is different
// from the file's modtime, re-cache
confVars.mu.RLock()
if stat, err := os.Stat(confVars.assetsDir + "/" + confVars.indexFile); err == nil {
indexCache.mu.RLock()
if stat.ModTime() != indexCache.page.Modtime {
indexCache.mu.RUnlock()
confVars.mu.RUnlock()
return true
}
indexCache.mu.RUnlock()
} else {
log.Printf("Couldn't stat index page: %v\n", err.Error())
}
confVars.mu.RUnlock()
// if the last tally time or stored mod time is zero, signal
// to re-cache the index
indexCache.mu.RLock()
if indexCache.page.LastTally.IsZero() || indexCache.page.Modtime.IsZero() {
indexCache.mu.RUnlock()
return true
}
indexCache.mu.RUnlock()
return false
}
// Re-caches the index page.
// This method helps satisfy the cacher interface.
func (indexCache *indexCacheBlk) cache() error {
confVars.mu.RLock()
body := render(genIndex(), confVars.wikiName+" "+confVars.titleSep+" "+confVars.wikiDesc)
confVars.mu.RUnlock()
if body == nil {
return errors.New("indexPage.cache(): getting nil bytes")
}
indexCache.mu.Lock()
indexCache.page.Body = body
indexCache.mu.Unlock()
return nil
}
// Generate the front page of the wiki
func genIndex() []byte {
var err error
confVars.mu.RLock()
indexpath := confVars.assetsDir + "/" + confVars.indexFile
confVars.mu.RUnlock()
stat, err := os.Stat(indexpath)
if err != nil {
log.Printf("Couldn't stat index: %v\n", err.Error())
}
indexCache.mu.RLock()
if indexCache.page.Modtime != stat.ModTime() {
indexCache.mu.RUnlock()
indexCache.mu.Lock()
indexCache.page.Raw, err = ioutil.ReadFile(indexpath)
indexCache.mu.Unlock()
if err != nil {
return []byte("Could not open \"" + indexpath + "\"")
}
} else {
indexCache.mu.RUnlock()
}
body := make([]byte, 0)
buf := bytes.NewBuffer(body)
// scan the file line by line until it finds the anchor
// comment. replace the anchor comment with a list of
// wiki pages sorted alphabetically by title.
indexCache.mu.RLock()
builder := bufio.NewScanner(bytes.NewReader(indexCache.page.Raw))
indexCache.mu.RUnlock()
builder.Split(bufio.ScanLines)
for builder.Scan() {
if bytes.Equal(builder.Bytes(), []byte("<!--pagelist-->")) {
tallyPages(buf)
} else {
n, err := buf.Write(append(builder.Bytes(), byte('\n')))
if err != nil || n == 0 {
log.Printf("Error writing to buffer: %v\n", err.Error())
}
}
}
// the LastTally field lets us know
// when the index was last generated
// by this function.
indexCache.mu.Lock()
indexCache.page.LastTally = time.Now()
indexCache.mu.Unlock()
return buf.Bytes()
}
// Generate a list of pages for the index.
// Called by genIndex() when the anchor
// comment has been found.
func tallyPages(buf *bytes.Buffer) {
// get a list of files in the directory specified
// in the config file parameter "PageDir"
confVars.mu.RLock()
if files, err := ioutil.ReadDir(confVars.pageDir); err == nil {
// entry is used in the loop to construct the markdown
// link to the given page
if len(files) == 0 {
n, err := buf.WriteString("*No wiki pages! Add some content.*\n")
if err != nil || n == 0 {
log.Printf("Error writing to buffer: %v\n", err.Error())
}
confVars.mu.RUnlock()
return
}
if confVars.reverseTally {
for i := len(files) - 1; i >= 0; i-- {
writeIndexLinks(files[i], buf)
}
} else {
for _, f := range files {
writeIndexLinks(f, buf)
}
}
} else {
n, err := buf.WriteString("*PageDir can't be read.*\n")
if err != nil || n == 0 {
log.Printf("Error writing to buffer: %v\n", err.Error())
}
}
err := buf.WriteByte(byte('\n'))
if err != nil {
log.Printf("Error writing to buffer: %v\n", err.Error())
}
confVars.mu.RUnlock()
}
// Takes in a file and outputs a markdown link to it.
// Called by tallyPages() for each file in the pages
// directory.
func writeIndexLinks(f os.FileInfo, buf *bytes.Buffer) {
var page *Page
var err error
if _, exists := pageCache.pool[f.Name()]; exists {
page, err = pullFromCache(f.Name())
if err != nil {
log.Printf("%v\n", err.Error())
}
} else {
// if it hasn't been cached, cache it.
// usually means the page is new.
confVars.mu.RLock()
newpage := newBarePage(confVars.pageDir+"/"+f.Name(), f.Name())
confVars.mu.RUnlock()
if err := newpage.cache(); err != nil {
log.Printf("While caching page %v during the index generation, caught an error: %v\n", f.Name(), err.Error())
}
page, err = pullFromCache(f.Name())
if err != nil {
log.Printf("%v\n", err.Error())
}
}
// get the URI path from the file name
// and write the formatted link to the
// bytes.Buffer
linkname := bytes.TrimSuffix([]byte(page.Shortname), []byte(".md"))
confVars.mu.RLock()
n, err := buf.WriteString("* [" + page.Title + "](" + confVars.viewPath + string(linkname) + ") " + page.Desc + " " + page.Author + "\n")
confVars.mu.RUnlock()
if err != nil || n == 0 {
log.Printf("Error writing to buffer: %v\n", err.Error())
}
}
// Caches a page.
// This method helps satisfy the cacher interface.
func (page *Page) cache() error {
// If buildPage() successfully returns a page
// object ptr, then push it into the cache
if newpage, err := buildPage(page.Longname); err == nil {
pageCache.mu.Lock()
pageCache.pool[newpage.Shortname] = newpage
pageCache.mu.Unlock()
} else {
log.Printf("Couldn't cache %v: %v", page.Longname, err.Error())
return err
}
return nil
}
// Compare the recorded modtime of a cached page to the
// modtime of the file on disk. If they're different,
// return `true`, indicating the cache needs
// to be refreshed. Also returns `true` if the
// page.Recache field is set to `true`.
// This method helps satisfy the cacher interface.
func (page *Page) checkCache() bool {
if page == nil {
return true
}
if newpage, err := os.Stat(page.Longname); err == nil {
if newpage.ModTime() != page.Modtime || page.Recache {
return true
}
} else {
log.Println("Can't stat " + page.Longname + ". Using cached copy...")
}
return false
}
// When TildeWiki first starts, pull all available pages
// into cache, saving their modification time as well to
// detect changes to a page.
func genPageCache() {
// spawn a new goroutine for each entry, to cache
// everything as quickly as possible
confVars.mu.RLock()
if wikipages, err := ioutil.ReadDir(confVars.pageDir); err == nil {
var wg sync.WaitGroup
for _, f := range wikipages {
wg.Add(1)
go func(f os.FileInfo) {
confVars.mu.RLock()
page := newBarePage(confVars.pageDir+"/"+f.Name(), f.Name())
confVars.mu.RUnlock()
if err := page.cache(); err != nil {
log.Printf("While generating initial cache, caught error for %v: %v\n", f.Name(), err.Error())
}
log.Printf("Cached page %v\n", page.Shortname)
wg.Done()
}(f)
}
wg.Wait()
} else {
log.Printf("Initial cache build :: Can't read directory: %s\n", err.Error())
log.Printf("**NOTICE** TildeWiki's cache may not function correctly until this is resolved.\n")
log.Printf("\tPlease verify the directory in tildewiki.yml is correct and restart TildeWiki\n")
}
confVars.mu.RUnlock()
}
// Wrapper function to check the cache
// of any cacher type, and if true,
// re-cache the data
func pingCache(c cacher) {
if c.checkCache() {
if err := c.cache(); err != nil {
log.Printf("Pinged cache, received error while caching: %v\n", err.Error())
}
}
}
// Pulling from cache is its own function.
// Less worrying about mutexes.
func pullFromCache(filename string) (*Page, error) {
pageCache.mu.RLock()
if page, ok := pageCache.pool[filename]; ok {
pageCache.mu.RUnlock()
return page, nil
}
pageCache.mu.RUnlock()
return nil, fmt.Errorf("error pulling %v from cache", filename)
}
// Blanks stored modtimes for the page cache.
// Used to trigger a forced re-cache on the
// next page load.
func triggerRecache() {
for _, v := range pageCache.pool {
v.Recache = true
}
}