-
-
Notifications
You must be signed in to change notification settings - Fork 125
feat(reader): add document mode to the PDF viewer, overhaul PDF viewer, add different modes to CBX viewer #306
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
40123ee
47e68bd
58e1650
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,29 @@ | ||
| package org.booklore.config.security.filter; | ||
|
|
||
| import jakarta.servlet.FilterChain; | ||
| import jakarta.servlet.ServletException; | ||
| import jakarta.servlet.http.HttpServletRequest; | ||
| import jakarta.servlet.http.HttpServletResponse; | ||
| import org.springframework.core.annotation.Order; | ||
| import org.springframework.stereotype.Component; | ||
| import org.springframework.web.filter.OncePerRequestFilter; | ||
|
|
||
| import java.io.IOException; | ||
|
|
||
| /** | ||
| * Enables SharedArrayBuffer by setting Cross-Origin isolation headers. | ||
| * Required by pdfium WASM (Emscripten-compiled with thread support) used by EmbedPDF. | ||
| * Without these headers the WASM module stalls on instantiation. | ||
| */ | ||
| @Component | ||
| @Order(1) | ||
| public class SharedArrayBufferHeaderFilter extends OncePerRequestFilter { | ||
|
|
||
| @Override | ||
| protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain) | ||
| throws ServletException, IOException { | ||
| response.setHeader("Cross-Origin-Opener-Policy", "same-origin"); | ||
| response.setHeader("Cross-Origin-Embedder-Policy", "credentialless"); | ||
| filterChain.doFilter(request, response); | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -153,6 +153,21 @@ public void getBookContent( | |
| bookService.streamBookContent(bookId, bookType, request, response); | ||
| } | ||
|
|
||
| @Operation(summary = "Replace book content", description = "Overwrite the primary PDF file for a book with the uploaded content. Used by the document viewer to persist annotation changes.") | ||
| @ApiResponses({ | ||
| @ApiResponse(responseCode = "204", description = "Book content replaced successfully"), | ||
| @ApiResponse(responseCode = "404", description = "Book not found") | ||
| }) | ||
| @PutMapping("/{bookId}/content") | ||
| @CheckBookAccess(bookIdParam = "bookId") | ||
| public ResponseEntity<Void> replaceBookContent( | ||
| @Parameter(description = "ID of the book") @PathVariable long bookId, | ||
| @Parameter(description = "Optional book type for alternative format") @RequestParam(required = false) String bookType, | ||
| HttpServletRequest request) throws java.io.IOException { | ||
| bookService.replaceBookContent(bookId, bookType, request.getInputStream()); | ||
| return ResponseEntity.noContent().build(); | ||
| } | ||
|
Comment on lines
+156
to
+169
|
||
|
|
||
| @Operation(summary = "Download book", description = "Download the book file. Requires download permission or admin.") | ||
| @ApiResponses({ | ||
| @ApiResponse(responseCode = "200", description = "Book downloaded successfully"), | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,5 +1,6 @@ | ||
| package org.booklore.controller; | ||
|
|
||
| import org.booklore.model.dto.response.CbxPageDimension; | ||
| import org.booklore.model.dto.response.CbxPageInfo; | ||
| import org.booklore.service.reader.CbxReaderService; | ||
| import io.swagger.v3.oas.annotations.Operation; | ||
|
|
@@ -36,4 +37,13 @@ public List<CbxPageInfo> getPageInfo( | |
| @Parameter(description = "Optional book type for alternative format (e.g., PDF, CBX)") @RequestParam(required = false) String bookType) { | ||
| return cbxReaderService.getPageInfo(bookId, bookType); | ||
| } | ||
|
|
||
| @Operation(summary = "Get page dimensions for a CBX book", description = "Retrieve width, height, and wide flag for each page in a CBX book.") | ||
| @ApiResponse(responseCode = "200", description = "Page dimensions returned successfully") | ||
| @GetMapping("/{bookId}/page-dimensions") | ||
| public List<CbxPageDimension> getPageDimensions( | ||
| @Parameter(description = "ID of the book") @PathVariable Long bookId, | ||
| @Parameter(description = "Optional book type for alternative format (e.g., PDF, CBX)") @RequestParam(required = false) String bookType) { | ||
| return cbxReaderService.getPageDimensions(bookId, bookType); | ||
| } | ||
|
Comment on lines
+41
to
+48
|
||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,17 @@ | ||
| package org.booklore.model.dto.response; | ||
|
|
||
| import lombok.AllArgsConstructor; | ||
| import lombok.Builder; | ||
| import lombok.Data; | ||
| import lombok.NoArgsConstructor; | ||
|
|
||
| @Data | ||
| @Builder | ||
| @NoArgsConstructor | ||
| @AllArgsConstructor | ||
| public class CbxPageDimension { | ||
| private int pageNumber; | ||
| private int width; | ||
| private int height; | ||
| private boolean wide; | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,5 +2,6 @@ | |
|
|
||
| public enum CbxPageScrollMode { | ||
| PAGINATED, | ||
| INFINITE | ||
| INFINITE, | ||
| LONG_STRIP | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -43,6 +43,7 @@ | |
| import java.nio.file.Files; | ||
| import java.nio.file.Path; | ||
| import java.nio.file.Paths; | ||
| import java.nio.file.StandardCopyOption; | ||
| import java.util.*; | ||
| import java.util.stream.Collectors; | ||
|
|
||
|
|
@@ -205,6 +206,7 @@ public BookViewerSettings getBookViewerSetting(long bookId, long bookFileId) { | |
| .bookId(bookId) | ||
| .zoom(pdfPref.getZoom()) | ||
| .spread(pdfPref.getSpread()) | ||
| .isDarkTheme(pdfPref.getIsDarkTheme()) | ||
| .build())); | ||
| newPdfViewerPreferencesRepository.findByBookIdAndUserId(bookId, user.getId()) | ||
| .ifPresent(pdfPref -> settingsBuilder.newPdfSettings(NewPdfViewerPreferences.builder() | ||
|
|
@@ -382,6 +384,25 @@ public void streamBookContent(long bookId, String bookType, HttpServletRequest r | |
| fileStreamingService.streamWithRangeSupport(path, contentType, request, response); | ||
| } | ||
|
|
||
| public void replaceBookContent(long bookId, String bookType, java.io.InputStream content) throws IOException { | ||
| BookEntity bookEntity = bookRepository.findByIdWithBookFiles(bookId) | ||
| .orElseThrow(() -> ApiError.BOOK_NOT_FOUND.createException(bookId)); | ||
|
|
||
| Path filePath; | ||
| if (bookType != null) { | ||
| BookFileType requestedType = BookFileType.valueOf(bookType.toUpperCase()); | ||
| BookFileEntity bookFile = bookEntity.getBookFiles().stream() | ||
| .filter(bf -> bf.getBookType() == requestedType) | ||
| .findFirst() | ||
| .orElseThrow(() -> ApiError.FILE_NOT_FOUND.createException("No file of type " + bookType + " found for book")); | ||
| filePath = bookFile.getFullFilePath(); | ||
| } else { | ||
| filePath = FileUtils.getBookFullPath(bookEntity); | ||
| } | ||
|
|
||
| Files.copy(content, filePath, StandardCopyOption.REPLACE_EXISTING); | ||
| } | ||
|
Comment on lines
+387
to
+404
|
||
|
|
||
| @Transactional | ||
| public ResponseEntity<BookDeletionResponse> deleteBooks(Set<Long> ids) { | ||
| BookLoreUser user = authenticationService.getAuthenticatedUser(); | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -9,6 +9,7 @@ | |
| import org.apache.commons.compress.archivers.zip.ZipArchiveEntry; | ||
| import org.apache.pdfbox.io.IOUtils; | ||
| import org.booklore.exception.ApiError; | ||
| import org.booklore.model.dto.response.CbxPageDimension; | ||
| import org.booklore.model.dto.response.CbxPageInfo; | ||
| import org.booklore.model.entity.BookEntity; | ||
| import org.booklore.model.entity.BookFileEntity; | ||
|
|
@@ -19,6 +20,11 @@ | |
| import org.booklore.util.UnrarHelper; | ||
| import org.springframework.stereotype.Service; | ||
|
|
||
| import javax.imageio.ImageIO; | ||
| import javax.imageio.ImageReader; | ||
| import javax.imageio.stream.ImageInputStream; | ||
| import java.io.ByteArrayInputStream; | ||
| import java.io.ByteArrayOutputStream; | ||
| import java.io.FileNotFoundException; | ||
| import java.io.IOException; | ||
| import java.io.InputStream; | ||
|
|
@@ -113,6 +119,63 @@ public List<CbxPageInfo> getPageInfo(Long bookId, String bookType) { | |
| } | ||
| } | ||
|
|
||
| public List<CbxPageDimension> getPageDimensions(Long bookId) { | ||
| return getPageDimensions(bookId, null); | ||
| } | ||
|
|
||
| public List<CbxPageDimension> getPageDimensions(Long bookId, String bookType) { | ||
| Path cbxPath = getBookPath(bookId, bookType); | ||
| try { | ||
| CachedArchiveMetadata metadata = getCachedMetadata(cbxPath); | ||
| List<String> imageEntries = metadata.imageEntries; | ||
| List<CbxPageDimension> dimensions = new ArrayList<>(); | ||
| for (int i = 0; i < imageEntries.size(); i++) { | ||
| String entryName = imageEntries.get(i); | ||
| CbxPageDimension dim = readEntryDimension(cbxPath, entryName, metadata, i + 1); | ||
| dimensions.add(dim); | ||
| } | ||
| return dimensions; | ||
| } catch (IOException e) { | ||
| log.error("Failed to read page dimensions for book {}", bookId, e); | ||
| throw ApiError.FILE_READ_ERROR.createException("Failed to read page dimensions: " + e.getMessage()); | ||
| } | ||
| } | ||
|
Comment on lines
+126
to
+142
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Performance concern: Sequential processing of all pages may cause slow response times. This endpoint reads and decodes every image in the archive sequentially to extract dimensions. For a 500-page comic book:
This could result in multi-second response times for large archives, potentially causing frontend timeouts or poor UX. Consider these optimizations:
List<CbxPageDimension> dimensions = IntStream.range(0, imageEntries.size())
.parallel()
.mapToObj(i -> readEntryDimension(cbxPath, imageEntries.get(i), metadata, i + 1))
.toList();
🤖 Prompt for AI Agents
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. CBX is a streaming reader that processes images 1 by 1, the perf there negligible. |
||
|
|
||
| private CbxPageDimension readEntryDimension(Path cbxPath, String entryName, CachedArchiveMetadata metadata, int pageNumber) { | ||
| try { | ||
| ByteArrayOutputStream baos = new ByteArrayOutputStream(); | ||
| streamEntryFromArchive(cbxPath, entryName, baos, metadata); | ||
| byte[] imageBytes = baos.toByteArray(); | ||
| try (ImageInputStream iis = ImageIO.createImageInputStream(new ByteArrayInputStream(imageBytes))) { | ||
| Iterator<ImageReader> readers = ImageIO.getImageReaders(iis); | ||
| if (readers.hasNext()) { | ||
| ImageReader reader = readers.next(); | ||
| try { | ||
| reader.setInput(iis); | ||
| int width = reader.getWidth(0); | ||
| int height = reader.getHeight(0); | ||
| return CbxPageDimension.builder() | ||
| .pageNumber(pageNumber) | ||
| .width(width) | ||
| .height(height) | ||
| .wide(width > height) | ||
| .build(); | ||
| } finally { | ||
| reader.dispose(); | ||
| } | ||
| } | ||
| } | ||
| } catch (IOException e) { | ||
| log.warn("Failed to read dimensions for page {} (entry: {}): {}", pageNumber, entryName, e.getMessage()); | ||
| } | ||
| return CbxPageDimension.builder() | ||
| .pageNumber(pageNumber) | ||
| .width(0) | ||
| .height(0) | ||
| .wide(false) | ||
| .build(); | ||
| } | ||
|
Comment on lines
+144
to
+177
|
||
|
|
||
| private String extractDisplayName(String entryPath) { | ||
| String fileName = baseName(entryPath); | ||
| int lastDotIndex = fileName.lastIndexOf('.'); | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| ALTER TABLE pdf_viewer_preference ADD COLUMN is_dark_theme BOOLEAN DEFAULT TRUE; |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,10 @@ | ||
| -- Change default PDF page spread from 'odd' to 'off' (single page view) | ||
| UPDATE user_settings | ||
| SET setting_value = REPLACE(setting_value, '"pageSpread":"odd"', '"pageSpread":"off"') | ||
| WHERE setting_key = 'PDF_READER_SETTING' | ||
| AND setting_value LIKE '%"pageSpread":"odd"%'; | ||
|
|
||
| -- Update existing per-book PDF viewer preferences | ||
| UPDATE pdf_viewer_preference | ||
| SET spread = 'off' | ||
| WHERE spread = 'odd'; |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,4 @@ | ||
| nodeLinker: node-modules | ||
| nmMode: hardlinks-local | ||
| enableGlobalCache: false | ||
| cacheFolder: .yarn/cache |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
🧩 Analysis chain
🌐 Web query:
For browser COEP/COOP behavior: What are the practical effects of applyingCross-Origin-Embedder-Policy: credentiallessandCross-Origin-Opener-Policy: same-originto all HTTP responses (including API endpoints), and is route-scoping recommended?💡 Result:
Applying Cross-Origin-Embedder-Policy: credentialless (COEP) and Cross-Origin-Opener-Policy: same-origin (COOP) to all HTTP responses, including API endpoints, enables cross-origin isolation for documents served by those responses, unlocking features like SharedArrayBuffer and high-precision timers. However, COEP applies only to documents (HTML), workers, and their subresource loading contexts—not directly to non-document API responses like JSON APIs. Practical effects: For HTML documents (pages): - Enables cross-origin isolation when combined with COOP: same-origin, allowing access to powerful APIs (e.g., SharedArrayBuffer, precise Performance.now). - COEP: credentialless allows loading cross-origin no-cors subresources (e.g., images, scripts via
, <script>) without CORP headers by omitting credentials (cookies, client certs). Credentialed cross-origin loads require CORS (e.g., via crossorigin="use-credentials" or fetch(mode: 'cors')). - Cross-origin iframes still need CORP or credentialless attribute for embedding. - Protects against side-channel attacks (e.g., Spectre) by restricting embedder capabilities. For API endpoints (JSON/XML/etc.): - No direct behavioral change, as COEP/COOP do not apply to non-document responses. JSON APIs are unaffected. - If an API response is navigated to (rare), or used in a worker/service worker with COEP inherited, subresource fetches from that context follow credentialless rules. - Safe to apply universally, as it only affects documents/workers served by the origin. Security benefits (both headers): - COOP: same-origin isolates the browsing context group, severing window.opener links for cross-origin popups/windows, preventing XS-Leaks and tabnabbing. Same-origin windows share context only if matching COOP. - Together: Ensures process isolation, blocking malicious cross-origin interactions. Potential breakage: - Breaks cross-origin popup communication (e.g., OAuth flows relying on window.opener; use BroadcastChannel/SharedWorker instead). - COEP blocks non-compliant cross-origin subresources unless credentialless or CORS/CORP opted-in. - Recursive: Child iframes/documents must comply with COEP. Route-scoping recommendation: Yes, route-scoping is recommended over applying to all responses. Use COEP/COOP only on HTML pages needing isolation (e.g., for SharedArrayBuffer). Exclude: - API endpoints (unnecessary, no effect). - Legacy pages/popups needing cross-origin opener access. Test with report-only mode (COEP-Report-Only, COOP-Report-Only) first. Universal application is safe but suboptimal—scoping avoids unnecessary enforcement on non-documents and reduces breakage risk.
Citations:
🏁 Script executed:
Repository: grimmory-tools/grimmory
Length of output: 166
🏁 Script executed:
Repository: grimmory-tools/grimmory
Length of output: 1399
Scope cross-origin isolation headers to viewer/PDF routes instead of all responses.
The filter currently applies COOP and COEP headers unconditionally to every HTTP response. While these headers are technically safe on API/WebSocket endpoints (they only affect documents and workers, not JSON responses), applying them globally is not a best practice. Since the headers are required only for the EmbedPDF WASM feature, restrict this filter to routes that actually need SharedArrayBuffer. Additionally, COOP: same-origin can break cross-origin opener communication in OAuth flows and other popup-based integrations.
🔧 Example scoping approach
public class SharedArrayBufferHeaderFilter extends OncePerRequestFilter { + `@Override` + protected boolean shouldNotFilter(HttpServletRequest request) { + String path = request.getRequestURI(); + return path.startsWith("/api/") + || path.startsWith("/ws/") + || path.startsWith("/actuator/"); + } + `@Override` protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain) throws ServletException, IOException { response.setHeader("Cross-Origin-Opener-Policy", "same-origin"); response.setHeader("Cross-Origin-Embedder-Policy", "credentialless"); filterChain.doFilter(request, response); } }🤖 Prompt for AI Agents