Skip to content

Commit edecd60

Browse files
author
Enzo DJABALI
committed
Implementing file search, pagination, bulk ops & URL S3 instance routing
1 parent 354a506 commit edecd60

File tree

11 files changed

+1057
-170
lines changed

11 files changed

+1057
-170
lines changed

internal/app/s3manager/bucket_view.go

Lines changed: 155 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -7,39 +7,81 @@ import (
77
"net/http"
88
"path"
99
"regexp"
10+
"sort"
11+
"strconv"
1012
"strings"
1113
"time"
1214

1315
"github.com/minio/minio-go/v7"
1416
)
1517

18+
// objectWithIcon represents an S3 object with additional display properties
19+
type objectWithIcon struct {
20+
Key string
21+
Size int64
22+
LastModified time.Time
23+
Owner string
24+
Icon string
25+
IsFolder bool
26+
DisplayName string
27+
}
28+
1629
// HandleBucketView shows the details page of a bucket.
1730
func HandleBucketView(s3 S3, templates fs.FS, allowDelete bool, listRecursive bool, rootURL string) http.HandlerFunc {
18-
type objectWithIcon struct {
19-
Key string
20-
Size int64
21-
LastModified time.Time
22-
Owner string
23-
Icon string
24-
IsFolder bool
25-
DisplayName string
26-
}
27-
2831
type pageData struct {
29-
RootURL string
30-
BucketName string
31-
Objects []objectWithIcon
32-
AllowDelete bool
33-
Paths []string
34-
CurrentPath string
32+
RootURL string
33+
BucketName string
34+
Objects []objectWithIcon
35+
AllowDelete bool
36+
Paths []string
37+
CurrentPath string
38+
SortBy string
39+
SortOrder string
40+
Page int
41+
PerPage int
42+
TotalItems int
43+
TotalPages int
44+
HasPrevPage bool
45+
HasNextPage bool
46+
Search string
3547
}
3648

3749
return func(w http.ResponseWriter, r *http.Request) {
3850
regex := regexp.MustCompile(`\/buckets\/([^\/]*)\/?(.*)`)
39-
matches := regex.FindStringSubmatch(r.RequestURI)
51+
matches := regex.FindStringSubmatch(r.URL.Path)
4052
bucketName := matches[1]
4153
path := matches[2]
4254

55+
// Get sorting parameters from query string
56+
sortBy := r.URL.Query().Get("sortBy")
57+
sortOrder := r.URL.Query().Get("sortOrder")
58+
59+
// Default sorting
60+
if sortBy == "" {
61+
sortBy = "key"
62+
}
63+
if sortOrder == "" {
64+
sortOrder = "asc"
65+
}
66+
67+
// Get pagination parameters
68+
page := 1
69+
if pageStr := r.URL.Query().Get("page"); pageStr != "" {
70+
if p, err := strconv.Atoi(pageStr); err == nil && p > 0 {
71+
page = p
72+
}
73+
}
74+
75+
perPage := 25
76+
if perPageStr := r.URL.Query().Get("perPage"); perPageStr != "" {
77+
if pp, err := strconv.Atoi(perPageStr); err == nil && pp > 0 {
78+
perPage = pp
79+
}
80+
}
81+
82+
// Get search parameter
83+
search := strings.TrimSpace(r.URL.Query().Get("search"))
84+
4385
var objs []objectWithIcon
4486
opts := minio.ListObjectsOptions{
4587
Recursive: listRecursive,
@@ -63,16 +105,87 @@ func HandleBucketView(s3 S3, templates fs.FS, allowDelete bool, listRecursive bo
63105
}
64106
objs = append(objs, obj)
65107
}
108+
109+
// Filter objects based on search query
110+
if search != "" {
111+
searchLower := strings.ToLower(search)
112+
filteredObjs := make([]objectWithIcon, 0)
113+
for _, obj := range objs {
114+
// Search in DisplayName and Key (case-insensitive)
115+
if strings.Contains(strings.ToLower(obj.DisplayName), searchLower) ||
116+
strings.Contains(strings.ToLower(obj.Key), searchLower) {
117+
filteredObjs = append(filteredObjs, obj)
118+
}
119+
}
120+
objs = filteredObjs
121+
}
122+
123+
// Sort objects based on sortBy and sortOrder
124+
sortObjects(objs, sortBy, sortOrder)
125+
126+
// Calculate pagination
127+
totalItems := len(objs)
128+
totalPages := (totalItems + perPage - 1) / perPage
129+
if totalPages == 0 {
130+
totalPages = 1
131+
}
132+
if page > totalPages {
133+
page = totalPages
134+
}
135+
136+
// Paginate objects
137+
start := (page - 1) * perPage
138+
end := start + perPage
139+
if start < 0 {
140+
start = 0
141+
}
142+
if end > totalItems {
143+
end = totalItems
144+
}
145+
if start < totalItems {
146+
objs = objs[start:end]
147+
} else {
148+
objs = []objectWithIcon{}
149+
}
150+
66151
data := pageData{
67152
RootURL: rootURL,
68153
BucketName: bucketName,
69154
Objects: objs,
70155
AllowDelete: allowDelete,
71156
Paths: removeEmptyStrings(strings.Split(path, "/")),
72157
CurrentPath: path,
158+
SortBy: sortBy,
159+
SortOrder: sortOrder,
160+
Page: page,
161+
PerPage: perPage,
162+
TotalItems: totalItems,
163+
TotalPages: totalPages,
164+
HasPrevPage: page > 1,
165+
HasNextPage: page < totalPages,
166+
Search: search,
167+
}
168+
169+
funcMap := template.FuncMap{
170+
"add": func(a, b int) int { return a + b },
171+
"sub": func(a, b int) int { return a - b },
172+
"mul": func(a, b int) int { return a * b },
173+
"min": func(a, b int) int {
174+
if a < b {
175+
return a
176+
}
177+
return b
178+
},
179+
"iterate": func(start, end int) []int {
180+
result := make([]int, 0, end-start)
181+
for i := start; i < end; i++ {
182+
result = append(result, i)
183+
}
184+
return result
185+
},
73186
}
74187

75-
t, err := template.ParseFS(templates, "layout.html.tmpl", "bucket.html.tmpl")
188+
t, err := template.New("").Funcs(funcMap).ParseFS(templates, "layout.html.tmpl", "bucket.html.tmpl")
76189
if err != nil {
77190
handleHTTPError(w, fmt.Errorf("error parsing template files: %w", err))
78191
return
@@ -114,3 +227,27 @@ func removeEmptyStrings(input []string) []string {
114227
}
115228
return result
116229
}
230+
231+
// sortObjects sorts the objects based on the specified field and order
232+
func sortObjects(objs []objectWithIcon, sortBy, sortOrder string) {
233+
sort.Slice(objs, func(i, j int) bool {
234+
var less bool
235+
switch sortBy {
236+
case "size":
237+
less = objs[i].Size < objs[j].Size
238+
case "owner":
239+
less = strings.ToLower(objs[i].Owner) < strings.ToLower(objs[j].Owner)
240+
case "lastModified":
241+
less = objs[i].LastModified.Before(objs[j].LastModified)
242+
case "key":
243+
fallthrough
244+
default:
245+
less = strings.ToLower(objs[i].DisplayName) < strings.ToLower(objs[j].DisplayName)
246+
}
247+
248+
if sortOrder == "desc" {
249+
return !less
250+
}
251+
return less
252+
})
253+
}
Lines changed: 134 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,134 @@
1+
package s3manager
2+
3+
import (
4+
"archive/zip"
5+
"encoding/json"
6+
"fmt"
7+
"io"
8+
"net/http"
9+
"time"
10+
11+
"github.com/gorilla/mux"
12+
"github.com/minio/minio-go/v7"
13+
)
14+
15+
// BulkDeleteRequest represents the request body for bulk delete
16+
type BulkDeleteRequest struct {
17+
Keys []string `json:"keys"`
18+
}
19+
20+
// BulkDownloadRequest represents the request body for bulk download
21+
type BulkDownloadRequest struct {
22+
Keys []string `json:"keys"`
23+
}
24+
25+
// HandleBulkDeleteObjects deletes multiple objects from a bucket.
26+
func HandleBulkDeleteObjects(s3 S3) http.HandlerFunc {
27+
return func(w http.ResponseWriter, r *http.Request) {
28+
bucketName := mux.Vars(r)["bucketName"]
29+
30+
var req BulkDeleteRequest
31+
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
32+
handleHTTPError(w, fmt.Errorf("error parsing request: %w", err))
33+
return
34+
}
35+
36+
if len(req.Keys) == 0 {
37+
http.Error(w, "no keys provided", http.StatusBadRequest)
38+
return
39+
}
40+
41+
// Create a channel for objects to delete
42+
objectsCh := make(chan minio.ObjectInfo)
43+
44+
// Send object names to the channel
45+
go func() {
46+
defer close(objectsCh)
47+
for _, key := range req.Keys {
48+
objectsCh <- minio.ObjectInfo{Key: key}
49+
}
50+
}()
51+
52+
// Remove objects
53+
errorCh := s3.RemoveObjects(r.Context(), bucketName, objectsCh, minio.RemoveObjectsOptions{})
54+
55+
// Check for errors
56+
for err := range errorCh {
57+
if err.Err != nil {
58+
handleHTTPError(w, fmt.Errorf("error removing object %s: %w", err.ObjectName, err.Err))
59+
return
60+
}
61+
}
62+
63+
w.Header().Set("Content-Type", "application/json")
64+
w.WriteHeader(http.StatusOK)
65+
w.Write([]byte(`{"success": true}`))
66+
}
67+
}
68+
69+
// HandleBulkDownloadObjects downloads multiple objects as a ZIP archive.
70+
func HandleBulkDownloadObjects(s3 S3) http.HandlerFunc {
71+
return func(w http.ResponseWriter, r *http.Request) {
72+
bucketName := mux.Vars(r)["bucketName"]
73+
74+
// Parse the form to get the keys
75+
if err := r.ParseForm(); err != nil {
76+
handleHTTPError(w, fmt.Errorf("error parsing form: %w", err))
77+
return
78+
}
79+
80+
keysJSON := r.FormValue("keys")
81+
var keys []string
82+
if err := json.Unmarshal([]byte(keysJSON), &keys); err != nil {
83+
handleHTTPError(w, fmt.Errorf("error parsing keys: %w", err))
84+
return
85+
}
86+
87+
if len(keys) == 0 {
88+
http.Error(w, "no keys provided", http.StatusBadRequest)
89+
return
90+
}
91+
92+
// Set headers for ZIP download
93+
timestamp := time.Now().Format("20060102-150405")
94+
zipFilename := fmt.Sprintf("%s-%s.zip", bucketName, timestamp)
95+
w.Header().Set("Content-Type", "application/zip")
96+
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", zipFilename))
97+
98+
// Create a new ZIP writer
99+
zipWriter := zip.NewWriter(w)
100+
defer zipWriter.Close()
101+
102+
// Add each object to the ZIP
103+
for _, key := range keys {
104+
// Get the object from S3
105+
object, err := s3.GetObject(r.Context(), bucketName, key, minio.GetObjectOptions{})
106+
if err != nil {
107+
// Log error but continue with other files
108+
continue
109+
}
110+
111+
// Get object info to check if it's valid
112+
_, err = object.Stat()
113+
if err != nil {
114+
object.Close()
115+
continue
116+
}
117+
118+
// Create a file in the ZIP
119+
zipFile, err := zipWriter.Create(key)
120+
if err != nil {
121+
object.Close()
122+
continue
123+
}
124+
125+
// Copy the object content to the ZIP file
126+
_, err = io.Copy(zipFile, object)
127+
object.Close()
128+
if err != nil {
129+
// Error writing to ZIP, but we can't return HTTP error at this point
130+
continue
131+
}
132+
}
133+
}
134+
}

0 commit comments

Comments
 (0)