Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions badges_app/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
FROM ubuntu:18.04

RUN apt update && \
apt install -y wget bash python3.6-venv python3.6-dev python3-pip build-essential inkscape unzip librsvg2-bin poppler-utils

ENV LANG=C.UTF-8
ENV LANGUAGE=C.UTF-8
ENV LC_ALL=C.UTF-8

WORKDIR /app

COPY requirements.txt .
RUN pip3 install --upgrade pip && pip install -r requirements.txt

COPY . .

RUN wget --header 'Host: dl.dafont.com' --header 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' --header 'Accept-Language: en-US,en;q=0.5' --referer 'https://www.dafont.com/sansation.font' --header 'Upgrade-Insecure-Requests: 1' 'https://dl.dafont.com/dl/?f=sansation' --output-document 'sansation.zip' && \
unzip sansation.zip -d /usr/local/share/fonts && \
fc-cache -f -v

EXPOSE 8000

CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
33 changes: 33 additions & 0 deletions badges_app/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# Conference Badge Generator WebApp

A web application for generating conference badges from SVG templates and participant CSV data.

## Features

- Upload multiple SVG badge templates
- Upload multiple CSV files with attendee data
- Two output options:
- Separate PDF files for each badge
- Single merged PDF with 4 badges per page (with customizable dimensions)

## 🚀 Quick Start with Docker

1. Navigate to the project directory:

```bash
cd badges_app
```

2. Build the Docker image:

```bash
docker build -t badge-generator .
```

3. Run the container:

```bash
docker run -d -p 8000:8000 --name badge-app badge-generator
```

4. Open the app in your browser: http://localhost:8000
Empty file added badges_app/__init__.py
Empty file.
225 changes: 225 additions & 0 deletions badges_app/analyze_csv_and_svg.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,225 @@
import argparse
from difflib import SequenceMatcher
import base64
import io
from fastapi import UploadFile
import xml.etree.ElementTree as ET
import re
from typing import List
from pdf2image import convert_from_bytes
import os
import shutil
import csv

from generate_badges import create_badges

# Known naming variants for each target field
KNOWN_VARIANTS = {
'First Name': ['firstname', 'fname', 'givenname'],
'Last Name': ['lastname', 'surname', 'lname', 'familyname'],
'Company Name': ['companyname', 'company name', 'company', 'organization', 'employer'],
}


def normalize(name: str) -> str:
"""Normalize column name: remove special characters and lowercase it."""
return re.sub(r'[\s_\-\.]', '', name).lower()


def best_match(variants, normalized_columns, used_columns):
"""Return the best match from normalized_columns for the given variants."""
best_score = 0
best_match_column = None

for variant in variants:
for column in normalized_columns:
if column in used_columns:
continue
score = SequenceMatcher(None, variant, column).ratio()
if score > best_score:
best_score = score
best_match_column = column

return best_match_column if best_score >= 0.6 else None


def find_matching_columns(target, normalized_columns, used_columns, results):
"""Find best matching columns from a CSV file or a list of columns for the given target fields."""
variants = KNOWN_VARIANTS.get(target, [target])
match_column = best_match(variants, normalized_columns, used_columns)

if match_column:
matched_column = normalized_columns[match_column]
used_columns.add(match_column)

results[target] = {
'csv_column': matched_column,
'samples': []
}
return matched_column
return ""


def find_matching_columns_from_list(columns: list, target_fields: list) -> dict:
"""Find best matching columns from a columns list for the given target fields."""
normalized_columns = {normalize(c): c for c in columns}
results = {}
used_columns = set()

for target in target_fields:
matched_column = find_matching_columns(target, normalized_columns, used_columns, results)
if not matched_column:
results[target] = None

return results


def find_matching_columns_from_csv(csv_file: str, target_fields: list) -> dict:
"""Find best matching columns from a CSV file for the given target fields."""
with open(csv_file, mode='r', encoding='utf-8') as file:
reader = csv.reader(file)
columns = next(reader)

normalized_columns = {normalize(c): c for c in columns}
results = {}
used_columns = set()

for target in target_fields:
matched_column = find_matching_columns(target, normalized_columns, used_columns, results)
if matched_column:
# Collect up to 3 non-empty sample values
file.seek(0)
next(reader) # Skip header again
sample_count = 0
for row in reader:
idx = columns.index(matched_column)
if idx >= len(row):
continue
value = row[idx].strip()
if value:
results[target]['samples'].append(value)
sample_count += 1
if sample_count >= 3:
break
else:
results[target] = None # No match found

return results


def print_analysis(results: dict):
"""Print the matching results in a readable format."""
print("\nCSV Column Matching Results")
print("=" * 50)

for target, data in results.items():
print(f"\nTarget field: '{target}'")
if data:
print(f" Matched column: '{data['csv_column']}'")
if data['samples']:
print(f" Sample values: {', '.join(data['samples'])}")
else:
print(" (No sample values found)")
else:
print(" No matching column found.")

print("\n" + "=" * 50)


def list_of_strings(arg):
return arg.split(',')


def svg_to_image(tmp_preview_dir, role, template_filename: str, template_vars) -> str:
create_badges(template_filename, os.path.join(tmp_preview_dir, "preview_data.csv"), tmp_preview_dir, template_vars)

with open(os.path.join(tmp_preview_dir, f"{role}_0.pdf"), "rb") as f:
images = convert_from_bytes(f.read(), first_page=1, last_page=1)

buffered = io.BytesIO()
images[0].save(buffered, format="JPEG", quality=85)
return base64.b64encode(buffered.getvalue()).decode()


def prepare_preview_data(tmp_preview_dir, template_vars):
preview_data = {
"First Name": "Klaus",
"Last Name": "Templatemann",
"Company Name": "Badgeify"
}
matches = find_matching_columns_from_list(template_vars, preview_data.keys())

column_value_map = {}
for target_key, data in matches.items():
if data:
column_name = data["csv_column"]
column_value_map[column_name] = preview_data.get(target_key, "")

row = [column_value_map.get(col, "") for col in template_vars]

with open(os.path.join(tmp_preview_dir, "preview_data.csv"), "w", newline="", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(template_vars or [","])
writer.writerow(row or [","])


def analyze_svg_templates(svg_files: List[UploadFile], templates_dir: str):
tmp_preview_dir = "tmp_preview"
os.makedirs(tmp_preview_dir, exist_ok=True)
pattern = re.compile(r"\{\{([^}]+)\}\}")
results = {}
previews = {}

try:
for svg_file in svg_files:
template_filename = os.path.join(templates_dir, svg_file.filename)
with open(os.path.join(templates_dir, svg_file.filename), "rb") as f:
try:
root = ET.fromstring(f.read())
template_vars = set()
for elem in root.iter():
if elem.text and pattern.search(elem.text):
template_vars.update(pattern.findall(elem.text))
for attr in elem.attrib.values():
if pattern.search(attr):
template_vars.update(pattern.findall(attr))

results[svg_file.filename] = sorted(template_vars)
prepare_preview_data(tmp_preview_dir, template_vars)
previews[svg_file.filename] = svg_to_image(tmp_preview_dir, svg_file.filename.split('.')[0], template_filename, template_vars)
except ET.ParseError as e:
print(f"Error parsing SVG {svg_file.filename}: {e}")
results[svg_file.filename] = ["Invalid SVG file"]
previews[svg_file.filename] = None

return results, previews
finally:
shutil.rmtree(tmp_preview_dir, ignore_errors=True)


if __name__ == "__main__":
TARGET_FIELDS = ['First Name', 'Last Name', 'Company Name']

parser = argparse.ArgumentParser(
description="Find matching columns in a CSV file."
)
parser.add_argument(
"--csv_file",
help="Path to the CSV file to analyze",
type=str,
default=None
)
parser.add_argument(
"--columns",
help="List of columns to analyze",
type=list_of_strings,
default=None
)
args = parser.parse_args()

matches = {}
if args.csv_file:
matches = find_matching_columns_from_csv(args.csv_file, TARGET_FIELDS)
elif args.columns:
matches = find_matching_columns_from_list(args.columns, TARGET_FIELDS)
print_analysis(matches)
52 changes: 52 additions & 0 deletions badges_app/generate_badges.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import csv
import json
import os
import shutil
import subprocess


class MissingCSVFieldsException(Exception):
def __init__(self, svg_filename, csv_filename, missing_fields):
message = f"""
We’re unable to proceed with badge generation.<br><br>
<strong>Details:</strong><br>
The following fields are used in the badge template <code>{os.path.basename(svg_filename)}</code> but they are missing in the uploaded CSV file <code>{os.path.basename(csv_filename)}</code>:<br>
<ul>
{missing_fields}
</ul>
<br>
<strong>Please check your CSV file and ensure all required fields are present</strong>
"""
super().__init__(message)


def check_svg_and_csv_consistency(svg_file, csv_file, svg_analysis):
with open(csv_file, mode='r', encoding='utf-8') as file:
reader = csv.reader(file)
columns = next(reader)

missing_fields = [field for field in svg_analysis if field not in columns]
if missing_fields:
raise MissingCSVFieldsException(svg_file, csv_file, missing_fields)


def create_badges(template_file, input_file, output_dir, svg_analysis):
check_svg_and_csv_consistency(template_file, input_file, svg_analysis)

cmd = f'docstamp create -i {input_file} -t {template_file} -d pdf -o {output_dir} --index ""'
print('Calling {}'.format(cmd))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
_stdout, stderr = process.communicate()
if stderr and b'Failed to get connection' not in stderr:
raise Exception(stderr.decode())


def create_all_badges(roles, templates_dir, data_dir, output_dir, svg_analysis_results):
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
for role in roles:
create_badges(
os.path.join(templates_dir, f"{role}.svg"), os.path.join(data_dir, f"{role}.csv"), output_dir,
json.loads(svg_analysis_results)[f"{role}.svg"]
)
Loading