improved cache
This commit is contained in:
parent
a0e5864677
commit
c78c8f98fb
1 changed files with 67 additions and 22 deletions
89
text.go
89
text.go
|
@ -26,9 +26,20 @@ func HandleTextSearch(w http.ResponseWriter, query, safe, lang string, page int)
|
|||
const resultsPerPage = 10
|
||||
|
||||
cacheKey := CacheKey{Query: query, Page: page, Safe: safe, Lang: lang}
|
||||
combinedResults := getResultsFromCacheOrFetch(cacheKey, query, safe, lang, page, resultsPerPage)
|
||||
|
||||
hasPrevPage := page > 1
|
||||
hasNextPage := len(combinedResults) == resultsPerPage
|
||||
|
||||
displayResults(w, combinedResults, query, lang, time.Since(startTime).Seconds(), page, hasPrevPage, hasNextPage)
|
||||
|
||||
// Always check and cache the next page
|
||||
go cacheNextPageIfNotCached(query, safe, lang, page+1, resultsPerPage)
|
||||
}
|
||||
|
||||
func getResultsFromCacheOrFetch(cacheKey CacheKey, query, safe, lang string, page, resultsPerPage int) []TextSearchResult {
|
||||
cacheChan := make(chan []TextSearchResult)
|
||||
var combinedResults []TextSearchResult
|
||||
var fromCache bool
|
||||
|
||||
go func() {
|
||||
results, exists := resultsCache.Get(cacheKey)
|
||||
|
@ -43,30 +54,55 @@ func HandleTextSearch(w http.ResponseWriter, query, safe, lang string, page int)
|
|||
|
||||
select {
|
||||
case combinedResults = <-cacheChan:
|
||||
if combinedResults != nil {
|
||||
fromCache = true
|
||||
} else {
|
||||
combinedResults = fetchAndCacheResults(query, safe, lang, page, resultsPerPage)
|
||||
if combinedResults == nil {
|
||||
combinedResults = fetchResultsUntilFull(query, safe, lang, page, resultsPerPage)
|
||||
resultsCache.Set(cacheKey, combinedResults)
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
log.Println("Cache check timeout")
|
||||
combinedResults = fetchAndCacheResults(query, safe, lang, page, resultsPerPage)
|
||||
combinedResults = fetchResultsUntilFull(query, safe, lang, page, resultsPerPage)
|
||||
resultsCache.Set(cacheKey, combinedResults)
|
||||
}
|
||||
|
||||
// Only pre-fetch and cache results for the next page if we fetched new results
|
||||
if !fromCache {
|
||||
go func() {
|
||||
nextPageResults := fetchAndCacheResults(query, safe, lang, page+1, resultsPerPage)
|
||||
resultsCache.Set(CacheKey{Query: query, Page: page + 1, Safe: safe, Lang: lang}, nextPageResults)
|
||||
}()
|
||||
return combinedResults
|
||||
}
|
||||
|
||||
func cacheNextPageIfNotCached(query, safe, lang string, page, resultsPerPage int) {
|
||||
cacheKey := CacheKey{Query: query, Page: page, Safe: safe, Lang: lang}
|
||||
if _, exists := resultsCache.Get(cacheKey); !exists {
|
||||
log.Printf("Next page %d not cached, caching now...", page)
|
||||
nextPageResults := fetchResultsUntilFull(query, safe, lang, page, resultsPerPage)
|
||||
resultsCache.Set(cacheKey, nextPageResults)
|
||||
} else {
|
||||
log.Printf("Next page %d already cached", page)
|
||||
}
|
||||
}
|
||||
|
||||
func fetchResultsUntilFull(query, safe, lang string, page, resultsPerPage int) []TextSearchResult {
|
||||
var combinedResults []TextSearchResult
|
||||
currentPage := 1
|
||||
resultsNeeded := page * resultsPerPage
|
||||
|
||||
for len(combinedResults) < resultsNeeded {
|
||||
results := fetchAndCacheResults(query, safe, lang, currentPage, resultsPerPage)
|
||||
if len(results) == 0 {
|
||||
break
|
||||
}
|
||||
combinedResults = append(combinedResults, results...)
|
||||
currentPage++
|
||||
}
|
||||
|
||||
hasPrevPage := page > 1
|
||||
hasNextPage := len(combinedResults) == resultsPerPage
|
||||
startIndex := (page - 1) * resultsPerPage
|
||||
endIndex := startIndex + resultsPerPage
|
||||
|
||||
displayResults(w, combinedResults, query, lang, time.Since(startTime).Seconds(), page, hasPrevPage, hasNextPage)
|
||||
if startIndex >= len(combinedResults) {
|
||||
return []TextSearchResult{}
|
||||
}
|
||||
if endIndex > len(combinedResults) {
|
||||
endIndex = len(combinedResults)
|
||||
}
|
||||
|
||||
return combinedResults[startIndex:endIndex]
|
||||
}
|
||||
|
||||
func fetchAndCacheResults(query, safe, lang string, page, resultsPerPage int) []TextSearchResult {
|
||||
|
@ -113,24 +149,29 @@ func fetchAndCacheResults(query, safe, lang string, page, resultsPerPage int) []
|
|||
mu.Unlock()
|
||||
}
|
||||
|
||||
// Sort combinedResults by source priority: Google first, DuckDuckGo second, Qwant third
|
||||
sort.SliceStable(combinedResults, func(i, j int) bool {
|
||||
return sourceOrder(combinedResults[i].Source) < sourceOrder(combinedResults[j].Source)
|
||||
})
|
||||
|
||||
// Paginate results
|
||||
log.Printf("Fetched %d results for page %d", len(combinedResults), page)
|
||||
|
||||
return combinedResults
|
||||
}
|
||||
|
||||
func paginateResults(results []TextSearchResult, page, resultsPerPage int) []TextSearchResult {
|
||||
startIndex := (page - 1) * resultsPerPage
|
||||
endIndex := startIndex + resultsPerPage
|
||||
|
||||
// Ensure startIndex and endIndex are within bounds
|
||||
if startIndex >= len(combinedResults) {
|
||||
log.Printf("Paginating results: startIndex=%d, endIndex=%d, totalResults=%d", startIndex, endIndex, len(results))
|
||||
|
||||
if startIndex >= len(results) {
|
||||
return []TextSearchResult{}
|
||||
}
|
||||
if endIndex > len(combinedResults) {
|
||||
endIndex = len(combinedResults)
|
||||
if endIndex > len(results) {
|
||||
endIndex = len(results)
|
||||
}
|
||||
|
||||
return combinedResults[startIndex:endIndex]
|
||||
return results[startIndex:endIndex]
|
||||
}
|
||||
|
||||
func sourceOrder(source string) int {
|
||||
|
@ -147,6 +188,10 @@ func sourceOrder(source string) int {
|
|||
}
|
||||
|
||||
func displayResults(w http.ResponseWriter, results []TextSearchResult, query, lang string, elapsed float64, page int, hasPrevPage, hasNextPage bool) {
|
||||
log.Printf("Displaying results for page %d", page)
|
||||
log.Printf("Total results: %d", len(results))
|
||||
log.Printf("Has previous page: %t, Has next page: %t", hasPrevPage, hasNextPage)
|
||||
|
||||
tmpl, err := template.New("text.html").Funcs(template.FuncMap{
|
||||
"sub": func(a, b int) int {
|
||||
return a - b
|
||||
|
|
Loading…
Add table
Reference in a new issue