diff --git a/.github/workflows/netlify-preview.yml b/.github/workflows/netlify-preview.yml new file mode 100644 index 0000000..cc16853 --- /dev/null +++ b/.github/workflows/netlify-preview.yml @@ -0,0 +1,37 @@ +name: PR Preview (Netlify) + +on: + pull_request: + branches: [ main ] + +permissions: + contents: read + pull-requests: write + +concurrency: + group: netlify-preview-${{ github.event.pull_request.number }} + cancel-in-progress: true + +jobs: + deploy-preview: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Deploy to Netlify + uses: nwtgck/actions-netlify@v3.0 + with: + publish-dir: '.' + production-deploy: false + github-token: ${{ secrets.GITHUB_TOKEN }} + deploy-message: "PR #${{ github.event.pull_request.number }} — ${{ github.event.pull_request.title }}" + enable-pull-request-comment: true + enable-commit-comment: false + overwrites-pull-request-comment: true + github-deployment-environment: '' + github-deployment-description: '' + env: + NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} + NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }} + timeout-minutes: 5 diff --git a/README.md b/README.md index be7450c..00e058a 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,78 @@ # Line-Detection -Some components for detecting lines on images, in the mode of TPEN and IIIF/Web Annotation in the browser. + +Browser-only components and a TPEN 3 interface for automatically detecting handwriting lines in manuscript images. All processing runs entirely in the browser using the Canvas 2D API — no server-side backend is required. + +--- + +## Repository Structure + +``` +components/ + line-detection/ + detection.js — Core line-detection algorithms (projection, busyness, combined) + index.js — Custom Element + +interfaces/ + line-detection/ + index.html — TPEN 3 interface page + index.js — Interface logic (auth, page load, detection, save) +``` + +--- + +## Components + +### `components/line-detection/` + +#### `detection.js` + +Exports three async functions (and a resize helper) that operate entirely on a browser ``: + +| Function | Description | +|---|---| +| `resizeImageIfNeeded(image, maxDim)` | Down-scales an image to `maxDim` × `maxDim` preserving aspect ratio | +| `detectLines(imageElement)` | Horizontal projection-profile analysis | +| `detectLinesWithBusyness(imageElement)` | Block-busyness (horizontal colour change) analysis | +| `detectHandwritingLines(imageElement)` | Combined: morphological region detection + busyness per region | + +All functions return `Array<{x, y, width, height}>` in image-pixel coordinates. + +#### `index.js` — `` + +A self-contained Custom Element. Include it in any HTML page: + +```html + + +``` + +The `src` attribute triggers detection. On completion the element fires a `lines-detected` custom event: + +```js +detector.addEventListener('lines-detected', e => { + console.log(e.detail.lines) // Array<{x,y,width,height}> +}) +``` + +--- + +## TPEN 3 Interface — `interfaces/line-detection/` + +A standalone web page that integrates with the [TPEN 3](https://three.t-pen.org) transcription platform. + +### Usage + +Open `interfaces/line-detection/index.html` with the following URL parameters: + +``` +?projectID=&pageID= +``` + +### Workflow + +1. **Authenticate** — the interface redirects to TPEN 3 login if no valid token is found in `localStorage`. +2. **Load page** — the page's canvas is fetched from the TPEN 3 services API. +3. **Detect lines** — clicking _Detect Lines_ runs the browser-side detection and overlays bounding boxes. +4. **Save annotations** — clicking _Save Lines as Annotations_ writes the detected lines back to TPEN 3 as W3C Web Annotations with `motivation: "transcribing"` and `FragmentSelector` targets in canvas-coordinate space. + +No backend proxy is involved at any step. diff --git a/components/line-detection/detection.js b/components/line-detection/detection.js new file mode 100644 index 0000000..1b6a90a --- /dev/null +++ b/components/line-detection/detection.js @@ -0,0 +1,348 @@ +/** + * Core line detection algorithms for handwriting images. + * All operations run entirely in the browser using Canvas 2D API — no backend required. + * @module detection + */ + +/** + * Resize an image element or canvas to fit within maxDimension while preserving aspect ratio. + * Returns the original if it already fits. + * + * @param {HTMLImageElement|HTMLCanvasElement} image + * @param {number} [maxDimension=2048] + * @returns {HTMLImageElement|HTMLCanvasElement} + */ +export function resizeImageIfNeeded(image, maxDimension = 2048) { + const w = image.width ?? image.naturalWidth + const h = image.height ?? image.naturalHeight + if (w <= maxDimension && h <= maxDimension) return image + + let newWidth, newHeight + if (w > h) { + newWidth = maxDimension + newHeight = Math.floor(h * (maxDimension / w)) + } else { + newHeight = maxDimension + newWidth = Math.floor(w * (maxDimension / h)) + } + + const canvas = document.createElement('canvas') + canvas.width = newWidth + canvas.height = newHeight + canvas.getContext('2d').drawImage(image, 0, 0, newWidth, newHeight) + return canvas +} + +/** + * Find text line bounding boxes using a horizontal projection profile. + * Dark pixels (grayscale < 225) are counted per row; runs of high-count rows + * become candidate lines. + * + * @param {HTMLImageElement|HTMLCanvasElement} imageElement + * @returns {Array<{x:number,y:number,width:number,height:number}>} + */ +export async function detectLines(imageElement) { + const width = imageElement.width ?? imageElement.naturalWidth + const height = imageElement.height ?? imageElement.naturalHeight + + const canvas = document.createElement('canvas') + canvas.width = width + canvas.height = height + const ctx = canvas.getContext('2d') + ctx.drawImage(imageElement, 0, 0, width, height) + const { data } = ctx.getImageData(0, 0, width, height) + + const projection = new Array(height).fill(0) + for (let y = 0; y < height; y++) { + for (let x = 0; x < width; x++) { + const idx = (y * width + x) * 4 + const gray = Math.round(0.299 * data[idx] + 0.587 * data[idx + 1] + 0.114 * data[idx + 2]) + if ((255 - gray) > 30) projection[y]++ + } + } + + const smoothed = _smooth(projection, height, 3) + return _analyzeProjection(smoothed, height, width) +} + +/** + * Find text line bounding boxes by measuring row-level horizontal busyness + * (colour change between adjacent horizontal blocks). + * + * @param {HTMLImageElement|HTMLCanvasElement} imageElement + * @returns {Array<{x:number,y:number,width:number,height:number}>} + */ +export async function detectLinesWithBusyness(imageElement) { + const width = imageElement.width ?? imageElement.naturalWidth + const height = imageElement.height ?? imageElement.naturalHeight + + const canvas = document.createElement('canvas') + canvas.width = width + canvas.height = height + const ctx = canvas.getContext('2d') + ctx.drawImage(imageElement, 0, 0, width, height) + const { data } = ctx.getImageData(0, 0, width, height) + + const blockSize = Math.max(3, Math.floor(height / 200)) + const busyness = new Array(height).fill(0) + + for (let y = blockSize; y < height - blockSize; y++) { + for (let x = blockSize; x < width - blockSize; x += blockSize) { + const cur = (y * width + x) * 4 + const left = (y * width + (x - blockSize)) * 4 + const diff = (Math.abs(data[cur] - data[left]) + Math.abs(data[cur + 1] - data[left + 1]) + Math.abs(data[cur + 2] - data[left + 2])) + if (diff < 30) continue + busyness[y] += diff / 3 + for (let offset = 1; offset <= blockSize / 2; offset++) { + const w = (diff / 3) * (1 - offset / (blockSize / 2)) + if (y - offset >= 0) busyness[y - offset] += w + if (y + offset < height) busyness[y + offset] += w + } + } + busyness[y] /= width + } + + const window = Math.max(5, Math.floor(height / 150)) + const smoothed = _smooth(busyness, height, window) + return _analyzeBusynessProfile(smoothed, height, width) +} + +/** + * Combined detection: first locates text regions via morphological analysis, + * then runs busyness-based detection within each region. Falls back to + * full-image busyness detection when no regions are found. + * + * @param {HTMLImageElement|HTMLCanvasElement} imageElement + * @returns {Promise>} + */ +export async function detectHandwritingLines(imageElement) { + try { + const regions = await _detectTextRegions(imageElement) + if (!regions.length) return detectLinesWithBusyness(imageElement) + + const allLines = [] + for (const region of regions) { + if (region.width < 50 || region.height < 50) continue + const regionCanvas = document.createElement('canvas') + regionCanvas.width = region.width + regionCanvas.height = region.height + regionCanvas.getContext('2d').drawImage( + imageElement, + region.x, region.y, region.width, region.height, + 0, 0, region.width, region.height + ) + const regionLines = await detectLinesWithBusyness(regionCanvas) + for (const line of regionLines) { + allLines.push({ x: line.x + region.x, y: line.y + region.y, width: line.width, height: line.height }) + } + } + return allLines.length ? allLines : detectLinesWithBusyness(imageElement) + } catch { + return detectLinesWithBusyness(imageElement) + } +} + +// ── private helpers ──────────────────────────────────────────────────────────── + +function _smooth(arr, length, window) { + return arr.map((_, i) => { + let sum = 0, count = 0 + for (let j = Math.max(0, i - window); j < Math.min(length, i + window + 1); j++) { + sum += arr[j]; count++ + } + return sum / count + }) +} + +function _stats(arr) { + const mean = arr.reduce((s, v) => s + v, 0) / arr.length + const variance = arr.reduce((s, v) => s + (v - mean) ** 2, 0) / arr.length + return { mean, stdDev: Math.sqrt(variance) } +} + +function _analyzeProjection(proj, height, width) { + const { mean, stdDev } = _stats(proj) + const threshold = mean + stdDev * 0.5 + + const derivs = [] + for (let i = 1; i < height; i++) derivs.push(proj[i] - proj[i - 1]) + const smoothDerivs = _smooth(derivs, derivs.length, 3) + + const lines = [] + let inLine = false, startY = 0, lineMax = 0 + + for (let y = 0; y < height; y++) { + if (!inLine && (proj[y] > threshold || (y > 0 && smoothDerivs[y - 1] > stdDev * 0.3))) { + inLine = true; startY = y; lineMax = proj[y] + } else if (inLine && (proj[y] < threshold * 0.8 || (y > 0 && smoothDerivs[y - 1] < -stdDev * 0.3))) { + inLine = false + const h = y - startY + if (h > 5 && lineMax > threshold * 1.2) lines.push({ x: 0, y: startY, width, height: h }) + } + if (inLine) lineMax = Math.max(lineMax, proj[y]) + } + if (inLine) { + const h = height - startY + if (h > 5 && lineMax > threshold * 1.2) lines.push({ x: 0, y: startY, width, height: h }) + } + return lines.length <= 1 ? _findLinesWithLocalMaxima(proj, height, width) : lines +} + +function _findLinesWithLocalMaxima(proj, height, width) { + const minDist = Math.round(height * 0.02) + let peaks = [] + for (let i = 1; i < height - 1; i++) { + if (proj[i] > proj[i - 1] && proj[i] > proj[i + 1]) peaks.push({ y: i, value: proj[i] }) + } + peaks.sort((a, b) => b.value - a.value) + + const significant = [] + const used = new Set() + for (const peak of peaks) { + if ([...used].every(y => Math.abs(peak.y - y) >= minDist)) { + significant.push(peak) + used.add(peak.y) + } + } + significant.sort((a, b) => a.y - b.y) + + return significant.map((cur, i) => { + const startY = i === 0 ? 0 : Math.floor((significant[i - 1].y + cur.y) / 2) + const endY = i === significant.length - 1 ? height : Math.floor((cur.y + significant[i + 1].y) / 2) + return endY - startY > 5 ? { x: 0, y: startY, width, height: endY - startY } : null + }).filter(Boolean) +} + +function _analyzeBusynessProfile(profile, height, width) { + const { mean, stdDev } = _stats(profile) + const threshold = mean + stdDev * 0.75 + + const lines = [] + let inLine = false, startY = 0, peak = 0 + + for (let y = 0; y < height; y++) { + if (!inLine && profile[y] > threshold) { inLine = true; startY = y; peak = profile[y]; continue } + if (inLine && profile[y] < threshold * 0.6) { + inLine = false + const h = y - startY + if (h > 4 && peak > threshold * 1.1) lines.push({ x: 0, y: startY, width, height: h }) + continue + } + if (inLine) peak = Math.max(peak, profile[y]) + } + if (inLine) { + const h = height - startY + if (h > 4 && peak > threshold * 1.1) lines.push({ x: 0, y: startY, width, height: h }) + } + return lines.length ? lines : _findLinesWithLocalMaxima(profile, height, width) +} + +async function _detectTextRegions(imageElement) { + const width = imageElement.width ?? imageElement.naturalWidth + const height = imageElement.height ?? imageElement.naturalHeight + + const canvas = document.createElement('canvas') + canvas.width = width + canvas.height = height + const ctx = canvas.getContext('2d') + ctx.drawImage(imageElement, 0, 0, width, height) + const { data } = ctx.getImageData(0, 0, width, height) + + const ds = Math.max(1, Math.floor(Math.max(width, height) / 1000)) + const dsW = Math.floor(width / ds), dsH = Math.floor(height / ds) + + let sum = 0, cnt = 0 + for (let y = 0; y < height; y += ds * 2) { + for (let x = 0; x < width; x += ds * 2) { + const idx = (y * width + x) * 4 + sum += (data[idx] + data[idx + 1] + data[idx + 2]) / 3; cnt++ + } + } + const thresh = Math.min(40, (sum / cnt) * 0.5) + + const binary = Array.from({ length: dsH }, () => new Array(dsW).fill(0)) + for (let y = 0; y < dsH; y++) { + for (let x = 0; x < dsW; x++) { + const idx = (y * ds * width + x * ds) * 4 + binary[y][x] = (data[idx] + data[idx + 1] + data[idx + 2]) / 3 < (255 - thresh) ? 1 : 0 + } + } + + const dilated = _dilate(binary, 2, dsW, dsH) + const eroded = _erode(dilated, 1, dsW, dsH) + const regions = _findConnectedComponents(eroded, dsW, dsH) + return regions.map(r => ({ x: r.x * ds, y: r.y * ds, width: r.width * ds, height: r.height * ds })) +} + +function _dilate(img, k, w, h) { + const hk = Math.floor(k / 2) + return img.map((row, y) => row.map((_, x) => { + for (let ky = -hk; ky <= hk; ky++) { + for (let kx = -hk; kx <= hk; kx++) { + const ny = y + ky, nx = x + kx + if (ny >= 0 && ny < h && nx >= 0 && nx < w && img[ny][nx] === 1) return 1 + } + } + return 0 + })) +} + +function _erode(img, k, w, h) { + const hk = Math.floor(k / 2) + return img.map((row, y) => row.map((_, x) => { + for (let ky = -hk; ky <= hk; ky++) { + for (let kx = -hk; kx <= hk; kx++) { + const ny = y + ky, nx = x + kx + if (ny >= 0 && ny < h && nx >= 0 && nx < w && img[ny][nx] === 0) return 0 + } + } + return 1 + })) +} + +function _findConnectedComponents(img, width, height) { + // MAX caps the union-find table size. Images with very many tiny fragments may + // silently reuse existing labels once this limit is reached, but in practice the + // subsequent top-50 region filter discards such noisy components anyway. + const MAX = 1000 + const parent = Array.from({ length: MAX }, (_, i) => i) + function find(x) { return parent[x] === x ? x : (parent[x] = find(parent[x])) } + function union(a, b) { parent[find(a)] = find(b) } + + const label = Array.from({ length: height }, () => new Array(width).fill(0)) + let next = 1 + for (let y = 0; y < height; y++) { + for (let x = 0; x < width; x++) { + if (!img[y][x]) continue + const neighbors = [] + if (y > 0 && label[y - 1][x]) neighbors.push(label[y - 1][x]) + if (x > 0 && label[y][x - 1]) neighbors.push(label[y][x - 1]) + if (!neighbors.length) { label[y][x] = next < MAX ? next++ : find(1) } + else { label[y][x] = neighbors[0]; for (let i = 1; i < neighbors.length; i++) union(neighbors[0], neighbors[i]) } + } + } + for (let y = 0; y < height; y++) for (let x = 0; x < width; x++) if (label[y][x]) label[y][x] = find(label[y][x]) + + const counts = {}, minX = {}, minY = {}, maxX = {}, maxY = {} + for (let y = 0; y < height; y++) { + for (let x = 0; x < width; x++) { + const l = label[y][x]; if (!l) continue + counts[l] = (counts[l] || 0) + 1 + minX[l] = l in minX ? Math.min(minX[l], x) : x + minY[l] = l in minY ? Math.min(minY[l], y) : y + maxX[l] = l in maxX ? Math.max(maxX[l], x) : x + maxY[l] = l in maxY ? Math.max(maxY[l], y) : y + } + } + return Object.keys(counts) + .map(l => ({ l: +l, count: counts[l] })) + .sort((a, b) => b.count - a.count) + .slice(0, 50) + .map(({ l }) => { + const w = maxX[l] - minX[l] + 1, h = maxY[l] - minY[l] + 1 + return (w >= 10 && h >= 10 && counts[l] / (w * h) >= 0.08) + ? { x: minX[l], y: minY[l], width: w, height: h } + : null + }) + .filter(Boolean) +} diff --git a/components/line-detection/index.js b/components/line-detection/index.js new file mode 100644 index 0000000..d4cba1e --- /dev/null +++ b/components/line-detection/index.js @@ -0,0 +1,142 @@ +/** + * — Web Component + * + * Loads an image, runs automatic handwriting-line detection entirely in the + * browser (no backend required) and overlays bounding boxes on the image. + * + * Attributes: + * src — URL of the image to analyse (required) + * + * Events dispatched on the element: + * lines-detected — fired when detection completes; detail: { lines } + * + * @module handwriting-line-detector + */ + +import { resizeImageIfNeeded, detectHandwritingLines } from './detection.js' + +class HandwritingLineDetector extends HTMLElement { + constructor() { + super() + this.attachShadow({ mode: 'open' }) + this._lines = [] + } + + static get observedAttributes() { + return ['src'] + } + + async attributeChangedCallback(name, _old, newValue) { + if (name === 'src' && newValue) await this._processImage(newValue) + } + + /** @returns {Array} The most recently detected lines */ + get lines() { return this._lines } + + async _processImage(imageUrl) { + this._showLoading() + try { + const image = await this._loadImage(imageUrl) + const resized = resizeImageIfNeeded(image, 2048) + const lines = await detectHandwritingLines(resized) + + // Scale coordinates back when the image was resized + if (resized !== image) { + const sx = (image.naturalWidth || image.width) / resized.width + const sy = (image.naturalHeight || image.height) / resized.height + this._lines = lines.map(l => ({ x: l.x * sx, y: l.y * sy, width: l.width * sx, height: l.height * sy })) + } else { + this._lines = lines + } + + this._render(imageUrl, this._lines, image.naturalWidth || image.width, image.naturalHeight || image.height) + this.dispatchEvent(new CustomEvent('lines-detected', { bubbles: true, detail: { lines: this._lines } })) + } catch (err) { + this._showError(err.message) + } + } + + /** Load an image cross-origin (IIIF and most public image servers allow this). */ + _loadImage(url) { + return new Promise((resolve, reject) => { + const img = new Image() + img.crossOrigin = 'anonymous' + img.onload = () => resolve(img) + img.onerror = () => reject(new Error(`Could not load image: ${url}`)) + img.src = url + }) + } + + _showLoading() { + const style = document.createElement('style') + style.textContent = ':host { display: block; } .loading { padding: 20px; text-align: center; font-family: sans-serif; }' + const div = document.createElement('div') + div.className = 'loading' + div.textContent = 'Detecting lines…' + this.shadowRoot.replaceChildren(style, div) + } + + _showError(msg) { + const div = document.createElement('div') + div.textContent = `Error: ${msg}` + const p = document.createElement('p') + p.textContent = 'This may be due to CORS restrictions on the image server.' + const style = document.createElement('style') + style.textContent = ':host { display: block; } .error { padding: 20px; color: red; font-family: sans-serif; text-align: center; }' + div.className = 'error' + div.appendChild(document.createElement('br')) + div.appendChild(p) + this.shadowRoot.replaceChildren(style, div) + } + + _render(imageUrl, lines, naturalWidth, naturalHeight) { + const style = document.createElement('style') + style.textContent = ` + :host { display: block; } + .container { position: relative; display: inline-block; max-width: 100%; } + img { display: block; max-width: 100%; width: 100%; height: auto; } + .box { + position: absolute; + border: 2px solid red; + pointer-events: none; + box-sizing: border-box; + }` + + const container = document.createElement('div') + container.className = 'container' + + const img = document.createElement('img') + img.src = imageUrl + img.alt = 'Handwriting image' + img.crossOrigin = 'anonymous' + container.appendChild(img) + + lines.forEach(l => { + const box = document.createElement('div') + box.className = 'box' + box.dataset.x = l.x + box.dataset.y = l.y + box.dataset.w = l.width + box.dataset.h = l.height + container.appendChild(box) + }) + + this.shadowRoot.replaceChildren(style, container) + + const updateBoxes = () => { + if (!img.naturalHeight) return + this.shadowRoot.querySelectorAll('.box').forEach(box => { + const x = +box.dataset.x, y = +box.dataset.y, w = +box.dataset.w, h = +box.dataset.h + box.style.left = `${(x / naturalWidth) * 100}%` + box.style.top = `${(y / naturalHeight) * 100}%` + box.style.width = `${(w / naturalWidth) * 100}%` + box.style.height = `${(h / naturalHeight) * 100}%` + }) + } + img.addEventListener('load', updateBoxes) + if (img.complete) updateBoxes() + } +} + +customElements.define('handwriting-line-detector', HandwritingLineDetector) +export default HandwritingLineDetector diff --git a/index.html b/index.html new file mode 100644 index 0000000..86447f8 --- /dev/null +++ b/index.html @@ -0,0 +1,50 @@ + + + + + + Line Detection + + + +

Line Detection

+

Browser-only components and a TPEN 3 interface for automatically detecting handwriting lines in manuscript images. All processing runs in the browser using the Canvas 2D API — no server-side backend required.

+ +

Components

+
    +
  • + components/line-detection/ — + <handwriting-line-detector> Custom Element + core detection algorithms + (index.js, + detection.js) +
  • +
+ +

Interfaces

+
    +
  • + Line Detection — + TPEN 3 interface for running detection and saving results as annotations
    + interfaces/line-detection/ + (requires ?projectID=…&pageID=… URL parameters and a TPEN 3 login) +
  • +
+ +

Quick Component Demo

+

Drop the <handwriting-line-detector> element anywhere and point its src at a CORS-enabled IIIF image:

+
<script type="module" src="components/line-detection/index.js"></script>
+<handwriting-line-detector
+  src="https://www.e-codices.ch/loris/ssg%2Fssg-0016%2Fssg-0016_009v.jp2/full/full/0/default.jpg">
+</handwriting-line-detector>
+ +

See the README for full documentation.

+ + diff --git a/interfaces/line-detection/index.html b/interfaces/line-detection/index.html new file mode 100644 index 0000000..4253c2c --- /dev/null +++ b/interfaces/line-detection/index.html @@ -0,0 +1,117 @@ + + + + + + Line Detection — TPEN Interface + + + +
+

Line Detection — TPEN 3 Interface

+
+ +
Initialising…
+ + + +
+ + +
+ + + + diff --git a/interfaces/line-detection/index.js b/interfaces/line-detection/index.js new file mode 100644 index 0000000..dbd8baa --- /dev/null +++ b/interfaces/line-detection/index.js @@ -0,0 +1,298 @@ +/** + * Line Detection — TPEN 3 Interface + * + * Loads a canvas image from a TPEN 3 project page, runs automatic + * handwriting-line detection entirely in the browser, and saves the + * detected lines back to TPEN 3 as Web Annotations. + * + * Required URL parameters: + * projectID — TPEN 3 project identifier + * pageID — TPEN 3 page (AnnotationPage) identifier + * + * Authentication is handled by reading / writing the TPEN3 `userToken` + * from localStorage (same mechanism used by the main TPEN-interfaces repo). + */ + +import '../../components/line-detection/index.js' + +// ── Configuration ───────────────────────────────────────────────────────────── + +const TPEN3_URL = 'https://three.t-pen.org' +const SERVICES_URL = 'https://dev.api.t-pen.org' + +// ── DOM references ──────────────────────────────────────────────────────────── + +const statusEl = document.getElementById('status') +const controlsEl = document.getElementById('controls') +const detectBtn = document.getElementById('detectBtn') +const saveBtn = document.getElementById('saveBtn') +const clearBtn = document.getElementById('clearBtn') +const loginPrompt = document.getElementById('login-prompt') +const loginLink = document.getElementById('loginLink') +const detectorWrap = document.getElementById('detector-wrapper') +const detector = document.getElementById('detector') + +// ── State ───────────────────────────────────────────────────────────────────── + +const params = new URLSearchParams(location.search) +const projectID = params.get('projectID') +const pageID = params.get('pageID') + +let userToken = null +let canvasID = null // IIIF Canvas URI +let imageURL = null // full image URL derived from the canvas +let imageDims = { w: 0, h: 0 } // natural image dimensions +let canvasDims = { w: 0, h: 0 } // canvas dimensions (for selector scaling) +let detectedLines = [] + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +function setStatus(msg, type = '') { + statusEl.textContent = msg + statusEl.className = type +} + +function getToken() { + // Accept token from URL (first load) or localStorage (subsequent loads) + const fromUrl = params.get('idToken') + if (fromUrl) { + localStorage.setItem('userToken', fromUrl) + // Clean token from URL bar + const clean = new URL(location.href) + clean.searchParams.delete('idToken') + history.replaceState(null, '', clean.toString()) + return fromUrl + } + return localStorage.getItem('userToken') +} + +function isTokenExpired(token) { + try { + const payload = JSON.parse(atob(token.split('.')[1])) + return payload.exp * 1000 < Date.now() + } catch { + return true + } +} + +function requireLogin() { + loginLink.href = `${TPEN3_URL}/login?returnTo=${encodeURIComponent(location.href)}` + loginPrompt.style.display = 'block' + setStatus('Please log in to continue.', 'error') +} + +// ── TPEN Services helpers ───────────────────────────────────────────────────── + +async function fetchJSON(url, options = {}) { + const res = await fetch(url, { + ...options, + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${userToken}`, + ...(options.headers ?? {}) + } + }) + if (!res.ok) throw new Error(`${res.status} ${res.statusText} — ${url}`) + return res.json() +} + +/** Extract the trailing ID segment from a TPEN URI or bare ID string. */ +function extractId(uri) { + return (uri ?? '').split('/').pop() +} + +/** + * Load a TPEN 3 page and return the resolved AnnotationPage object. + */ +async function loadPage(projID, pgID) { + return fetchJSON(`${SERVICES_URL}/project/${projID}/page/${extractId(pgID)}`) +} + +/** + * Resolve a Canvas URI to a plain object (tries IIIF Presentation API v3/v2). + */ +async function loadCanvas(uri) { + return fetch(uri, { headers: { Accept: 'application/json' } }).then(r => r.ok ? r.json() : null) +} + +/** + * Extract the full image URL from a IIIF Canvas object. + */ +function getImageURLFromCanvas(canvas) { + // Presentation API v3 + const body = canvas?.items?.[0]?.items?.[0]?.body + if (body?.id) return body.id + if (body?.['@id']) return body['@id'] + // Presentation API v2 + const res = canvas?.images?.[0]?.resource + if (res?.['@id']) return res['@id'] + return null +} + +/** + * Build a Web Annotation in IIIF / W3C format from a detected line box. + * Coordinates are stored relative to the Canvas dimensions (not image pixels), + * consistent with TPEN 3 conventions. + * + * @param {{ x:number, y:number, width:number, height:number }} line — image-space coordinates + * @returns {object} + */ +function lineToAnnotation(line) { + // Scale from image-space to canvas-space + const scaleX = imageDims.w ? canvasDims.w / imageDims.w : 1 + const scaleY = imageDims.h ? canvasDims.h / imageDims.h : 1 + const x = Math.round(line.x * scaleX) + const y = Math.round(line.y * scaleY) + const w = Math.round(line.width * scaleX) + const h = Math.round(line.height * scaleY) + + return { + type: 'Annotation', + motivation: 'transcribing', + body: [], + target: { + source: canvasID, + type: 'SpecificResource', + selector: { + type: 'FragmentSelector', + conformsTo: 'http://www.w3.org/TR/media-frags/', + value: `xywh=pixel:${x},${y},${w},${h}` + } + } + } +} + +// ── Detection ───────────────────────────────────────────────────────────────── + +// Handle the lines-detected event from the custom element (registered once at startup) +function onLinesDetected(e) { + detectedLines = e.detail.lines + if (detectedLines.length) { + saveBtn.disabled = false + clearBtn.disabled = false + setStatus(`Detected ${detectedLines.length} line(s). Review them, then click "Save Lines as Annotations".`, 'success') + } else { + setStatus('No lines detected. Try a different image or adjust the image quality.', 'error') + } + detectBtn.disabled = false +} + +async function runDetection() { + if (!imageURL) { setStatus('No image loaded.', 'error'); return } + detectBtn.disabled = true + saveBtn.disabled = true + clearBtn.disabled = true + setStatus('Loading image and detecting lines…') + + // Remove existing src to force re-processing when re-running + detector.removeAttribute('src') + detector.setAttribute('src', imageURL) +} + +// ── Save ────────────────────────────────────────────────────────────────────── + +async function saveAnnotations() { + if (!detectedLines.length) return + saveBtn.disabled = true + setStatus('Saving annotations…') + + const annotations = detectedLines.map(lineToAnnotation) + + try { + await fetchJSON( + `${SERVICES_URL}/project/${projectID}/page/${extractId(pageID)}`, + { method: 'PUT', body: JSON.stringify({ items: annotations }) } + ) + setStatus(`Saved ${annotations.length} annotation(s) to TPEN 3.`, 'success') + } catch (err) { + setStatus(`Save failed: ${err.message}`, 'error') + saveBtn.disabled = false + } +} + +// ── Initialise ──────────────────────────────────────────────────────────────── + +async function init() { + if (!projectID || !pageID) { + setStatus('Missing required URL parameters: projectID and pageID.', 'error') + return + } + + userToken = getToken() + if (!userToken || isTokenExpired(userToken)) { + requireLogin() + return + } + + setStatus('Loading page from TPEN 3…') + let page + try { + page = await loadPage(projectID, pageID) + } catch (err) { + setStatus(`Could not load page: ${err.message}`, 'error') + return + } + + const targetCanvas = page.target + if (!targetCanvas) { + setStatus('The TPEN 3 page does not reference a canvas.', 'error') + return + } + const canvasURI = typeof targetCanvas === 'string' ? targetCanvas + : (targetCanvas.id ?? targetCanvas['@id'] ?? targetCanvas.source) + if (!canvasURI) { + setStatus('Could not determine canvas URI from the page.', 'error') + return + } + canvasID = canvasURI + + setStatus('Loading canvas…') + let canvas + try { + canvas = await loadCanvas(canvasURI) + } catch { + canvas = null + } + + if (canvas) { + canvasDims.w = canvas.width ?? canvas['@width'] ?? 0 + canvasDims.h = canvas.height ?? canvas['@height'] ?? 0 + imageURL = getImageURLFromCanvas(canvas) + } + + if (!imageURL) { + // If canvas resolution fails (e.g. CORS), treat the canvas URI as the image URL + imageURL = canvasURI + } + + // Probe natural image dimensions so we can scale selectors correctly + await new Promise(resolve => { + const probe = new Image() + probe.crossOrigin = 'anonymous' + probe.onload = () => { imageDims.w = probe.naturalWidth; imageDims.h = probe.naturalHeight; resolve() } + probe.onerror = () => resolve() + probe.src = imageURL + }) + if (!canvasDims.w) canvasDims.w = imageDims.w + if (!canvasDims.h) canvasDims.h = imageDims.h + + detectorWrap.style.display = 'block' + controlsEl.style.display = 'flex' + setStatus('Image ready. Click "Detect Lines" to start automatic line detection.') + + // Wire up the lines-detected event once (handles multiple detect runs) + detector.addEventListener('lines-detected', onLinesDetected) + + // Wire up controls + detectBtn.addEventListener('click', runDetection) + saveBtn.addEventListener('click', saveAnnotations) + clearBtn.addEventListener('click', () => { + detector.removeAttribute('src') + detectedLines = [] + saveBtn.disabled = true + clearBtn.disabled = true + setStatus('Detection cleared. Click "Detect Lines" to run again.') + }) +} + +init() diff --git a/netlify.toml b/netlify.toml new file mode 100644 index 0000000..9ef6285 --- /dev/null +++ b/netlify.toml @@ -0,0 +1,14 @@ +[build] + # No build step needed — this is a static HTML/JS site + publish = "." + +[build.environment] + NODE_VERSION = "20" + +# Disable automatic Netlify branch deploys (controlled via GitHub Actions) +[context.branch-deploy] + publish = "." + +# PR deploy previews +[context.deploy-preview] + publish = "."