diff --git a/examples/cat.jpeg b/examples/cat.jpeg new file mode 100644 index 0000000..2e34d1a Binary files /dev/null and b/examples/cat.jpeg differ diff --git a/examples/draw_image.py b/examples/draw_image.py new file mode 100644 index 0000000..41753b0 --- /dev/null +++ b/examples/draw_image.py @@ -0,0 +1,175 @@ +from PIL import Image +import numpy as np +from typing import List, Tuple, Dict +import asyncio +from frame_sdk import Frame +from frame_sdk.display import PixelArtConverter +from functools import lru_cache +import time +import logging +from collections import Counter + +from collections import defaultdict +import logging +from typing import List, Dict, Tuple +import numpy as np +from bleak.exc import BleakError +from skimage import io +from pyxelate import Pyx, Pal +import numpy as np +from collections import defaultdict +import logging +from typing import List, Tuple, Dict +import asyncio +from bleak.exc import BleakError +from PIL import Image +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +import matplotlib.pyplot as plt +import numpy as np +from typing import List +from frame_sdk.display import PaletteColors + +def visualize_palette_conversion(image_path: str, target_width: int = 128, target_height: int = 128): + """ + Visualize the palette conversion of an image using matplotlib. + + Args: + image_path (str): Path to the input image + target_width (int): Target width for conversion + target_height (int): Target height for conversion + """ + palette_data = PixelArtConverter.convert_image_to_palette( + image_path, + target_width=target_width, + target_height=target_height + ) + + rgb_array = np.zeros((target_height, target_width, 3), dtype=np.uint8) + + for y in range(target_height): + for x in range(target_width): + color = palette_data[y][x] + if isinstance(color, PaletteColors): + color_value = color.value + else: + color_value = int(color) + rgb_array[y, x] = PixelArtConverter.PALETTE_RGB[color_value] + + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6)) + + original_img = plt.imread(image_path) + ax1.imshow(original_img) + ax1.set_title('Original Image') + ax1.axis('off') + + ax2.imshow(rgb_array) + ax2.set_title('Palette Converted Image') + ax2.axis('off') + + palette_reference = np.zeros((16, 1, 3), dtype=np.uint8) + for i in range(16): + palette_reference[i, 0] = PixelArtConverter.PALETTE_RGB[i] + + ax3 = fig.add_axes([0.92, 0.1, 0.02, 0.8]) + ax3.imshow(palette_reference) + ax3.set_title('Palette') + ax3.set_xticks([]) + ax3.set_yticks(range(16)) + ax3.set_yticklabels([color.name for color in PaletteColors]) + + plt.tight_layout() + + unique_colors = set() + color_counts = {} + for row in palette_data: + for color in row: + if isinstance(color, PaletteColors): + color_name = color.name + else: + color_name = PaletteColors(int(color)).name + unique_colors.add(color_name) + color_counts[color_name] = color_counts.get(color_name, 0) + 1 + + print("\nColor Distribution:") + total_pixels = target_width * target_height + for color_name, count in sorted(color_counts.items(), key=lambda x: x[1], reverse=True): + percentage = (count / total_pixels) * 100 + print(f"{color_name}: {count} pixels ({percentage:.1f}%)") + + return palette_data, rgb_array + + +async def display_image(frame: Frame, x: int, y: int, image_path: str, + width: int = 64, height: int = 64, scale: int = 1): + """ + Load, process and display an image file on the Frame device. + + Args: + frame (Frame): Frame device instance + x (int): X coordinate to start drawing + y (int): Y coordinate to start drawing + image_path (str): Path to the image file + width (int): Desired width in pixels + height (int): Desired height in pixels + scale (int): Scale factor for the image + """ + try: + logging.info(f"Processing image file: {image_path}") + logging.info(f"Displaying image at ({x}, {y}) with scale {scale}") + + palette_data = PixelArtConverter.convert_image_to_palette(image_path, target_width=width, target_height=height) + await frame.display.draw_image(x=0, y=0, image_data=palette_data, scale=1) + await frame.display.show() + + logging.info("Image display completed successfully") + + except FileNotFoundError: + logging.error(f"Image file not found: {image_path}") + raise + except BleakError as e: + logging.error(f"Bluetooth communication error: {e}") + raise + except Exception as e: + logging.error(f"Error processing or displaying image: {e}") + raise + + + +async def main(): + logging.info("Starting main application...") + try: + async with Frame() as f: + logging.info("Frame connection established") + await display_image( + frame=f, + image_path="output.png", + x=10, + y=10, + width=64, + height=64, + scale=1 + ) + logging.info("Image display completed successfully") + except Exception as e: + logging.error(f"Error in main execution: {e}", exc_info=True) + finally: + logging.info("Application shutting down") + +if __name__ == "__main__": + ''' + # install pyxelate + pip install git+https://github.com/sedthh/pyxelate.git + + # create a 2 color quantized image of cat.jpeg + pyxelate cat.jpeg output.png --palette 2 + ''' + + # Uncomment this to see how the image should look in frame + # palette_data, rgb_array = visualize_palette_conversion("output.png", target_width=128, target_height=128) + # plt.show(block=False) + + # display output.png in frame + asyncio.run(main()) \ No newline at end of file diff --git a/examples/output.png b/examples/output.png new file mode 100644 index 0000000..ccf974c Binary files /dev/null and b/examples/output.png differ diff --git a/pyproject.toml b/pyproject.toml index 99f9e01..2fdcb40 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "frame-sdk" -version = "1.2.1" +version = "1.2.3" authors = [{ name = "Roger Pincombe", email = "pip@betechie.com" },{ name = "Brilliant Labs", email = "info@brilliant.xyz" }] description = "Python Developer SDK for Brilliant Frame glasses" readme = "readme.md" diff --git a/src/frame_sdk/display.py b/src/frame_sdk/display.py index 3ef5a78..359a0d8 100644 --- a/src/frame_sdk/display.py +++ b/src/frame_sdk/display.py @@ -1,10 +1,90 @@ from __future__ import annotations import asyncio -from typing import Optional, TYPE_CHECKING +from typing import Optional, TYPE_CHECKING, List, Union from enum import Enum if TYPE_CHECKING: from .frame import Frame + +from enum import Enum +import numpy as np +import logging +from PIL import Image +class PixelArtConverter: + PALETTE_RGB = np.array([ + [0, 0, 0], # VOID + [255, 255, 255], # WHITE + [157, 157, 157], # GRAY + [190, 38, 51], # RED + [224, 111, 139], # PINK + [73, 60, 43], # DARKBROWN + [164, 100, 34], # BROWN + [235, 137, 49], # ORANGE + [247, 226, 107], # YELLOW + [47, 72, 78], # DARKGREEN + [68, 137, 26], # GREEN + [163, 206, 39], # LIGHTGREEN + [27, 38, 50], # NIGHTBLUE + [0, 87, 132], # SEABLUE + [49, 162, 242], # SKYBLUE + [178, 220, 239], # CLOUDBLUE + ]) + + @staticmethod + def _find_unique_colors(img_array: np.ndarray) -> set: + return {tuple(color) for color in np.unique(img_array.reshape(-1, 3), axis=0)} + + @staticmethod + def _find_closest_palette_color(color: tuple) -> int: + """Find the closest palette color and return its index (PaletteColors value)""" + color_array = np.array(color) + distances = np.sqrt(np.sum((PixelArtConverter.PALETTE_RGB - color_array) ** 2, axis=1)) + return int(np.argmin(distances)) + + @classmethod + def convert_image_to_palette(cls, image_path: str, target_width: int = 64, target_height: int = 64) -> List[List[PaletteColors]]: + """ + Convert an image to Frame's PaletteColors format by quantizing to the closest matching colors. + The source image will be automatically quantized from its original color depth + to Frame's fixed 16-color palette: VOID (black), WHITE, GRAY, RED, PINK, DARKBROWN, + BROWN, ORANGE, YELLOW, DARKGREEN, GREEN, LIGHTGREEN, NIGHTBLUE, SEABLUE, + SKYBLUE, and CLOUDBLUE. + + This method performs the following steps: + 1. Loads and resizes the image to the target dimensions + 2. Converts the image to RGB format if needed + 3. Quantizes the image colors to match Frame's 16-color palette using nearest neighbor color matching + 4. Returns a 2D array of PaletteColors enum values + + Args: + image_path (str): Path to the source image file + target_width (int, optional): Desired width of the output image. Defaults to 64. + target_height (int, optional): Desired height of the output image. Defaults to 64. + + Returns: + List[List[PaletteColors]]: 2D array of PaletteColors enum values representing the quantized image. + Each pixel is mapped to the closest matching color in Frame's 16-color palette. + """ + with Image.open(image_path) as img: + if img.mode != 'RGB': + img = img.convert('RGB') + + img = img.resize((target_width, target_height), Image.Resampling.NEAREST) + img_array = np.array(img) + + unique_colors = cls._find_unique_colors(img_array) + + color_mapping = { + color: cls._find_closest_palette_color(color) + for color in unique_colors + } + + palette_data = [[ + PaletteColors(color_mapping[tuple(pixel)]) + for pixel in row + ] for row in img_array] + + return palette_data class Alignment(Enum): """Enum for text alignment options.""" @@ -519,7 +599,7 @@ async def draw_rect(self, x: int, y: int, w: int, h: int, color: PaletteColors = w = w // 8 * 8 await self.frame.run_lua(self._draw_rect_lua(x, y, w, h, color)) - + async def draw_rect_filled(self, x: int, y: int, w: int, h: int, border_width: int, border_color: PaletteColors, fill_color: PaletteColors): """ Draws a filled rectangle with a border on the display. @@ -547,4 +627,110 @@ async def draw_rect_filled(self, x: int, y: int, w: int, h: int, border_width: i lua_to_send = self._draw_rect_lua(x, y, w, h, border_color) # draw the inside rectangle lua_to_send += self._draw_rect_lua(x+border_width, y+border_width, w-border_width*2, h-border_width*2, fill_color) - await self.frame.run_lua(lua_to_send, checked=True) \ No newline at end of file + await self.frame.run_lua(lua_to_send, checked=True) + + async def draw_image(self, *, x: int, y: int, image_data: List[List[PaletteColors]], + scale: int = 1) -> List[List[PaletteColors]]: + """ + Draws a quantized image on the Frame display using bitmap operations. + It supports different color modes (2-color, 4-color, and 16-color) and uses a single + color mode for the entire image based on the maximum color value present. + + Args: + x (int): The x coordinate where the image will be drawn (1-based indexing). + y (int): The y coordinate where the image will be drawn (1-based indexing). + image_data (List[List[PaletteColors]]): 2D array of quantized PaletteColors representing the image pixels. + scale (int, optional): Scaling factor for the image. Defaults to 1. + + Returns: + List[List[PaletteColors]]: The processed image data that was drawn. + """ + if not image_data or not image_data[0]: + logging.warning("Empty image data received") + return [] + + height = len(image_data) + width = len(image_data[0]) + logging.debug(f"Processing image: {width}x{height} at scale {scale}") + + # Ensure coordinates are within valid range + lua_x = max(1, min(640 - width * scale, x + 1)) + lua_y = max(1, min(400 - height * scale, y + 1)) + + try: + await self.clear() + await asyncio.sleep(0.1) # Small delay after clear + + # Determine color mode for entire image + max_color = max( + max(color.value if isinstance(color, PaletteColors) else int(color) + for color in row) + for row in image_data + ) + + if max_color <= 1: + color_mode = 2 + pixels_per_byte = 8 + bit_mask = 0x01 + bits_per_pixel = 1 + elif max_color <= 3: + color_mode = 4 + pixels_per_byte = 4 + bit_mask = 0x03 + bits_per_pixel = 2 + else: + color_mode = 16 + pixels_per_byte = 2 + bit_mask = 0x0F + bits_per_pixel = 4 + + bytes_needed = (width + pixels_per_byte - 1) // pixels_per_byte + commands = [] + + # Process each row + for row_idx, row in enumerate(image_data): + row_values = [color.value if isinstance(color, PaletteColors) else int(color) for color in row] + + # Group pixels by color + color_groups = {} + for i, color in enumerate(row_values): + if color not in color_groups: + color_groups[color] = [] + color_groups[color].append(i) + + # Generate commands for each color in this row + for color, positions in color_groups.items(): + pattern = bytearray(bytes_needed) + + # Set bits for this color's pixels + for pos in positions: + byte_idx = pos // pixels_per_byte + bit_pos = (pixels_per_byte - 1 - (pos % pixels_per_byte)) * bits_per_pixel + pattern[byte_idx] |= bit_mask << bit_pos + + if not any(pattern): # Skip empty patterns + continue + + # scaled row commands + for sy in range(scale): + current_y = lua_y + row_idx * scale + sy + if current_y <= 400: # Check y boundary + pattern_hex = ''.join([f'\\x{b:02x}' for b in pattern]) + cmd = f'frame.display.bitmap({lua_x},{current_y},{width},{color_mode},{color},"{pattern_hex}")' + commands.append(cmd) + + # Execute commands in chunks to maintain connection + CHUNK_SIZE = 40 + for i in range(0, len(commands), CHUNK_SIZE): + chunk = commands[i:i + CHUNK_SIZE] + for cmd in chunk: + await self.frame.run_lua(cmd) + await asyncio.sleep(0.001) + + await self.show() + logging.debug("Image drawing completed successfully") + return image_data + + except Exception as e: + logging.error(f"Error during image drawing: {str(e)}") + raise \ No newline at end of file