diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..9ec6afa Binary files /dev/null and b/.DS_Store differ diff --git a/.gitignore b/.gitignore index f8599f4..ff34c73 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,21 @@ *.exe *.ilk *.obj -*.cfg \ No newline at end of file +*.cfg + +# 실험 데이터 +*.h5 +*.hdf5 + +# 파이썬 캐시 파일 +*.pyc +__pycache__/ + +# macOS 시스템 파일 +*.DS_Store + +# 옛날 파일 +*_old.py + +# txt 파일 +*.txt \ No newline at end of file diff --git a/Python/.DS_Store b/Python/.DS_Store new file mode 100644 index 0000000..e81bf6d Binary files /dev/null and b/Python/.DS_Store differ diff --git a/Python/20250912_030534/run_summary.txt b/Python/20250912_030534/run_summary.txt new file mode 100644 index 0000000..7bf5ae9 --- /dev/null +++ b/Python/20250912_030534/run_summary.txt @@ -0,0 +1,6 @@ +ended_at: 2025-09-12T03:24:25.187737 +result: OK +frames_captured: 20000 +frames_processed: 19954 +unique_freqs: 20 +hdf5: data.h5 diff --git a/Python/CLAUDE.md b/Python/CLAUDE.md new file mode 100644 index 0000000..d19b0fd --- /dev/null +++ b/Python/CLAUDE.md @@ -0,0 +1,153 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Project Overview + +This is the Python implementation of Thorlabs Scientific Camera examples. The code demonstrates camera control functionality including software triggering, continuous frame capture, and MP4 video recording using the Thorlabs TSI SDK. + +## Development Guidelines + +**IMPORTANT**: When making any code changes or additions: +- Base all coding decisions on the Thorlabs official examples in this directory +- Refer to the official Thorlabs Camera Python API Reference at `/Users/woojins/Documents/GitHub/Camera_Examples/Python/Thorlabs_Camera_Python_API_Reference.pdf` +- Follow the patterns and conventions established in the existing official examples +- Use only the APIs and methods documented in the official reference + +## CW ODMR Experiment Setup + +**CRITICAL**: For the CW ODMR experiment: +- **SynthHD Configuration**: The Windfreak SynthHD is pre-configured to "single step per trigger" mode +- **No SynthHD Control Needed in Code**: When SDG2082x CH.1 sends a trigger pulse, the SynthHD automatically advances to the next frequency step (50MHz increment) +- **Synchronization**: Each SDG trigger simultaneously: + 1. Triggers camera to capture one frame (20ms exposure) + 2. Triggers SynthHD to advance to next frequency step +- **Frame-Frequency Matching**: Frame N corresponds to frequency = 3.000 + ((N-1) % 20) × 0.050 GHz +- **No Manual SynthHD Programming Required**: The hardware is pre-configured externally + +## Architecture + +The Python examples use the Thorlabs TSI SDK with OpenCV for image processing and display: +- **thorlabs_tsi_sdk.tl_camera**: Core camera control and frame acquisition +- **thorlabs_tsi_sdk.tl_mono_to_color_processor**: Color processing for Bayer sensor cameras +- **opencv-python**: Image display and MP4 video writing +- **numpy**: Array manipulation for image data + +All examples depend on the Thorlabs TSI Camera SDK Python package and native DLLs. + +## Setup and Installation + +### Required Dependencies +```bash +# Install Thorlabs Python SDK (from ThorCam installation) +python -m pip install thorlabs_tsi_camera_python_sdk_package.zip + +# Install additional Python packages +pip install opencv-python numpy + +# If requirements.txt exists in ThorCam SDK examples +pip install -r requirements.txt +``` + +### DLL Setup (Windows) +1. Copy Native DLLs from ThorCam installation: + - Source: `\Scientific Camera Interfaces\SDK\Native Toolkit\dlls\Native_64_lib` + - Destination: `./dlls/64_lib/` (relative to Python scripts) +2. The `windows_setup.py` script handles DLL path configuration automatically + +### Running Examples +```bash +# Single frame capture with display +python grab_single_frame.py + +# Continuous frame capture (Ctrl+C to stop) +python grab_frames_polling_continuous.py + +# Record MP4 video (200 frames) +python opencv_mp4_writer_example.py +``` + +## SDK Dependencies + +### Thorlabs Scientific Camera SDK +- **Installation**: Download and install ThorCam software package from Thorlabs +- **Location**: `Program Files\Thorlabs\Scientific Imaging\Scientific Camera Support\` +- **Archive**: Extract `Scientific Camera Interfaces.zip` +- **Python Package**: `SDK\Python Toolkit\thorlabs_tsi_camera_python_sdk_package.zip` +- **Native DLLs**: `SDK\Native Toolkit\dlls\Native_64_lib\` → copy to `./dlls/64_lib/` + +### Python Dependencies +- **thorlabs_tsi_sdk**: Main camera control SDK +- **opencv-python**: Image processing and video writing +- **numpy**: Array operations for image data + +## Available Examples + +### grab_single_frame.py +- Initializes first detected camera +- Sets 10ms exposure time and continuous mode +- Captures single frame with software trigger +- Displays image using OpenCV (grayscale converted to RGB) +- Key pattern: SDK context manager, software triggering, OpenCV display + +### grab_frames_polling_continuous.py +- Continuous frame capture with 10 FPS frame rate control +- Polls for frames in infinite loop (Ctrl+C to exit) +- Real-time OpenCV display with frame counting +- Key pattern: Continuous acquisition, polling, frame rate control + +### opencv_mp4_writer_example.py +- Records 200 frames to `video.mp4` file +- Supports both monochrome and color (Bayer) cameras +- Automatic color processing for Bayer sensors (BGR format) +- Scales bit depth to 8-bit for MP4 compatibility +- Key pattern: Video recording, color processing, proper resource cleanup + +## Key Programming Patterns + +### SDK Context Management +```python +with TLCameraSDK() as sdk: + with sdk.open_camera(camera_id) as camera: + # Camera operations here + pass +# Automatic cleanup handled by context managers +``` + +### Frame Acquisition +```python +camera.arm(2) # Arm camera with 2-frame buffer +camera.issue_software_trigger() +frame = camera.get_pending_frame_or_null() +if frame is not None: + image_data = np.copy(frame.image_buffer) +``` + +### Image Data Handling +- Raw data comes as 1D array, reshape using camera dimensions +- Convert to OpenCV format (BGR for color, grayscale to RGB for display) +- Handle bit depth scaling for 8-bit outputs + +### Windows DLL Loading +- Import and call `windows_setup.configure_path()` before SDK imports +- Handles both 32-bit and 64-bit DLL detection +- Gracefully handles non-Windows platforms + +## Common Camera Parameters + +- `camera.exposure_time_us`: Exposure time in microseconds +- `camera.frames_per_trigger_zero_for_unlimited`: 0 for continuous mode +- `camera.image_poll_timeout_ms`: Frame polling timeout +- `camera.frame_rate_control_value`: Target frame rate +- `camera.is_frame_rate_control_enabled`: Enable/disable frame rate limiting +- `camera.image_width_pixels`, `camera.image_height_pixels`: Image dimensions +- `camera.bit_depth`: Sensor bit depth +- `camera.camera_sensor_type`: SENSOR_TYPE.BAYER for color cameras + +## Testing + +Testing requires physical Thorlabs camera hardware: +1. Connect camera via USB +2. Ensure ThorCam software can detect the camera +3. Run Python examples to verify SDK integration +4. Check OpenCV display windows and generated MP4 files \ No newline at end of file diff --git a/Python/CW_ODMR5.py b/Python/CW_ODMR5.py new file mode 100644 index 0000000..30d54a2 --- /dev/null +++ b/Python/CW_ODMR5.py @@ -0,0 +1,939 @@ +import threading +import queue +import time +import numpy as np +import matplotlib.pyplot as plt +import datetime +import glob +import os +try: + import h5py + HAS_H5PY = True +except Exception: + HAS_H5PY = False +from collections import defaultdict +import pyvisa +from thorlabs_tsi_sdk.tl_camera import TLCameraSDK, OPERATION_MODE +from thorlabs_tsi_sdk.tl_camera_enums import TRIGGER_POLARITY + +try: + # if on Windows, use the provided setup script to add the DLLs folder to the PATH + from windows_setup import configure_path + configure_path() +except ImportError: + configure_path = None + + +# 설정 +n_frames = 20000 # 측정할 총 프레임 수 (대용량 측정) +mw_start = 3e9 # 시작 MW 주파수: 3 GHz +mw_step = 5e7 # 주파수 스텝: 50 MHz +mw_steps = 20 # 20 스텝 (3 GHz ~ 3.95 GHz) + + +# ROI 영역 설정 (MANUAL 모드에서만 사용; AUTO일 때는 무시되고 중앙 고정 박스 사용) +roi_y_start, roi_y_end = 400, 801 +roi_x_start, roi_x_end = 550, 1001 + +# ROI 모드: 'AUTO' or 'MANUAL' +# - AUTO : 카메라 프레임 중앙에 고정 박스(ROI_AUTO_SIZE x ROI_AUTO_SIZE) +# - MANUAL: 중앙 정렬 500x500(align)로 강제 +ROI_MODE = "MANUAL" # 500x500 확보용: 런타임에서 중앙 정렬 500x500(align)로 강제 +ROI_AUTO_SIZE = 512 # AUTO 모드에서 사용할 정사각형 ROI 크기(픽셀) — 샘플(≈500–1000px)에 맞춤 +ROI_MIN_SIZE = 64 # 어떤 경우에도 최소 보장 크기(너무 작은 ROI 방지) + +# MANUAL 중앙 강제 ROI 설정용(정확한 500x500을 센터에 맞춰 적용, 정렬 단위 보장) +ROI_TARGET_SIZE = 500 # 원하는 정사각형 ROI 한 변(px) +ROI_ALIGN = 8 # 하드웨어 정렬 단위(대부분 8 또는 16 권장) + +# ----- Contrast baseline 설정 (딥이 ~1% 수준일 때 더 타이트/견고하게) ----- +CONTRAST_Q = 0.95 # 상위 quantile (예: 0.95 → 상위 5%) +CONTRAST_MIN_SAMPLES = 5 # 상위 구간에서 최소 샘플 수 (부족하면 max(y) 사용) + +# 검증된 ROI 경계 (카메라 해상도에 맞게 런타임에 계산) +roi_y0_valid = None +roi_y1_valid = None +roi_x0_valid = None +roi_x1_valid = None + +def _validate_and_set_roi_bounds(img_h, img_w): + global roi_y0_valid, roi_y1_valid, roi_x0_valid, roi_x1_valid + # AUTO 모드: 중앙 고정 박스 + if str(ROI_MODE).upper() == "AUTO": + box = int(max(ROI_MIN_SIZE, min(ROI_AUTO_SIZE, img_h, img_w))) + cy = img_h // 2 + cx = img_w // 2 + y0 = max(0, cy - box // 2) + y1 = min(img_h, y0 + box) + x0 = max(0, cx - box // 2) + x1 = min(img_w, x0 + box) + h = y1 - y0 + w = x1 - x0 + roi_y0_valid, roi_y1_valid = y0, y1 + roi_x0_valid, roi_x1_valid = x0, x1 + print(f"ROI 확정[AUTO]: x=[{x0},{x1}) y=[{y0},{y1}) (size={w}x{h}), img={img_w}x{img_h}") + return (y0, y1, x0, x1) + + # MANUAL 모드: 중앙 정사각형(ROI_TARGET_SIZE, 정렬 보장)으로 강제 적용 + # 1) 타겟 크기를 프레임에 맞춰 제한 + 정렬 단위에 스냅 + size_req = min(int(ROI_TARGET_SIZE), int(img_h), int(img_w)) + size_aligned = max(ROI_MIN_SIZE, (size_req // ROI_ALIGN) * ROI_ALIGN) + size_aligned = min(size_aligned, img_h, img_w) + + # 2) 센터 기준 좌표 계산(프레임 내부에 완전히 들어오도록 클램프) + cy = img_h // 2 + cx = img_w // 2 + y0 = max(0, min(cy - size_aligned // 2, img_h - size_aligned)) + y1 = y0 + size_aligned + x0 = max(0, min(cx - size_aligned // 2, img_w - size_aligned)) + x1 = x0 + size_aligned + h = y1 - y0 + w = x1 - x0 + + print(f"ROI 확정[MANUAL]: x=[{x0},{x1}) y=[{y0},{y1}) (size={w}x{h}), img={img_w}x{img_h}") + + roi_y0_valid, roi_y1_valid = y0, y1 + roi_x0_valid, roi_x1_valid = x0, x1 + return (y0, y1, x0, x1) + +# 런 폴더(날짜_시간)와 주파수별 저장 폴더를 미리 생성 (효율성 향상) +code_dir = os.path.dirname(os.path.abspath(__file__)) +run_stamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") +run_dir = os.path.join(code_dir, run_stamp) +os.makedirs(run_dir, exist_ok=True) + + +# 런타임 플래그 +PRINT_PER_FRAME = True # 프레임당 1회 로그 출력 (프레임 번호 + 주파수) +SAVE_TXT = False # 주파수 폴더별 .txt 저장 (비권장: 파일 수 많음) +SAVE_HDF5 = True # HDF5에 ROI/메타데이터 스트리밍 저장 + +LIVE_ODMR = True # 스윕 1회마다 라이브 평균 ODMR 플롯 업데이트 + +# 카메라 외부 트리거 엣지 설정: 'rising' 또는 'falling' +CAMERA_TRIGGER_EDGE = "rising" + +# 카메라 프레임 데이터와 intensity 데이터 저장용 자료구조 +# 프레임 큐: 이미지 전체를 저장하지 않고 (frame_count, roi_total_intensity)만 저장하여 메모리 사용 최소화 +frame_queue = queue.Queue(maxsize=512) +plot_queue = queue.Queue(maxsize=16) # 라이브 플롯 업데이트용 (메인 스레드에서만 그림) +intensity_dict = {} # key: MW 주파수 (Hz), value: list of ROI 총 intensity 값 + +# 전역 요약 카운터 (컨슈머가 업데이트, 메인 스레드에서 읽기 전용) +contrast_counts = defaultdict(int) + +# 전역 변수 및 동기화를 위한 변수 +frames_captured = 0 +capture_lock = threading.Lock() +camera_ready = False # 카메라가 ARM되면 True로 설정 +measurement_complete = False +synth_start_event = threading.Event() # 연속 프레임 안정 진입 시 SynthHD(CH2) 시작 신호 +# 비상 중단 신호 (워치독이 트리거) +abort_event = threading.Event() + +# ----------------------------------------- +# SDG2082x 제어 함수 (PyVISA 이용) +# ----------------------------------------- +def sdg_control(): + rm = pyvisa.ResourceManager() + try: + sdg = rm.open_resource("USB0::0xF4EC::0xEE38::SDG2XCAD1R2393::INSTR") + except Exception as e: + print("SDG2082x 연결 실패:", e) + return + + try: + sdg.write("*RST") + time.sleep(0.1) + sdg.write("C1:BSWV WVTP,PULSE") # 펄스 모드 선택 + sdg.write("C1:BSWV FRQ,25") # 25Hz 펄스 → 40ms 주기 + sdg.write("C1:OUTP LOAD,HZ") + sdg.write("C1:BSWV AMP,3.3") # 3.3 Vpp (LVTTL range) + sdg.write("C1:BSWV OFST,1.65") # 0–3.3 V level (centered) + # LVTTL 신호 및 200µs 최소 펄스 폭 권고에 맞춤 + sdg.write("C1:BSWV WIDTH,2e-4") # 200 µs pulse width (>= 100 µs min) + # --- Debug: dump CH1 config --- + try: + cfg1 = sdg.query("C1:BSWV?") + print("[SDG] CH1 BSWV:", cfg1.strip()) + except Exception as e: + print("[SDG] CH1 BSWV? query failed:", e) + except Exception as e: + print("SDG 초기 설정 오류:", e) + return + + # ----------------------- + # CH2: SynthHD 트리거 전용 (기본 HIGH, 짧은 LOW 펄스 = 1 step) + # ----------------------- + try: + sdg.write("C2:BSWV WVTP,PULSE") + sdg.write("C2:BSWV FRQ,25") + sdg.write("C2:OUTP LOAD,HZ") + # Use explicit levels only (avoid AMP/OFST overrides) + sdg.write("C2:BSWV HLEV,3.0") # High level = 3.0 V (baseline) + sdg.write("C2:BSWV LLEV,0.0") # Low level = 0.0 V (pulse) + sdg.write("C2:BSWV WIDTH,2e-4") # 200 µs (LOW 폭으로 사용; step time보다 짧게) + # Ensure ~200 µs pulse at 25 Hz: duty ≈ 0.5% (0.0002 / 0.04) + sdg.write("C2:BSWV DUTY,0.5") # 0.5% duty → ~200 µs pulse width at 25 Hz + # Some firmware may ignore POL, so this is best-effort only + try: + sdg.write("C2:BSWV POL,POS") + except Exception: + pass + sdg.write("C2:OUTP OFF") # 안정 진입 신호 전까지 OFF + # --- Debug: dump CH2 config --- + try: + cfg2 = sdg.query("C2:BSWV?") + print("[SDG] CH2 BSWV:", cfg2.strip()) + except Exception as e: + print("[SDG] CH2 BSWV? query failed:", e) + except Exception as e: + print("SDG CH2 초기 설정 오류:", e) + + # 카메라 준비까지 대기 후 CH1 ON (카메라 트리거) + print("SDG2082x 제어 시작: 카메라 준비 대기 중...") + while not camera_ready: + if abort_event.is_set(): + try: + sdg.write("C1:OUTP OFF"); sdg.write("C2:OUTP OFF") + except Exception: + pass + sdg.close() + print("ABORT 수신: SDG 출력 OFF 후 종료") + return + time.sleep(0.01) + print("카메라 준비 신호 수신. ARM 안정화 0.5 s 대기...") + time.sleep(1.5) # ARM 직후 안정화 대기 + print("SDG2082x 펄스 출력 시작 (CH1→Camera)") + sdg.write("C1:OUTP ON") # 카메라용 트리거 시작 + + # 컨슈머가 연속 프레임 안정 진입을 알리면 CH2를 켜 SynthHD 스텝 시작 + def enable_ch2_once(): + if not synth_start_event.wait(timeout=60): # 60s 내 안정 진입 신호 없으면 건너뜀 + return + try: + print("STABLE 신호 수신: CH2(SynthHD) 트리거 시작") + sdg.write("C2:OUTP ON") + except Exception as e: + print("CH2 출력 ON 실패:", e) + + ch2_once = threading.Thread(target=enable_ch2_once, daemon=True) + ch2_once.start() + + global frames_captured + while True: + if abort_event.is_set(): + try: + sdg.write("C1:OUTP OFF"); sdg.write("C2:OUTP OFF") + except Exception: + pass + sdg.close() + print("ABORT 수신: SDG 출력 OFF 후 종료") + return + with capture_lock: + if frames_captured >= n_frames: + break + time.sleep(0.01) + # 출력 비활성화 후 종료 + try: + sdg.write("C1:OUTP OFF") + except Exception: + pass + try: + sdg.write("C2:OUTP OFF") + except Exception: + pass + sdg.close() + print("SDG 제어 종료: 20000 프레임 수집 후 SDG 꺼짐") + +# ----------------------------------------- +# 카메라 Producer 함수 +# ----------------------------------------- +def camera_producer(): + global frames_captured, camera_ready + with TLCameraSDK() as sdk: + available_cameras = sdk.discover_available_cameras() + if len(available_cameras) < 1: + print("카메라가 감지되지 않았습니다.") + return + with sdk.open_camera(available_cameras[0]) as camera: + # 하드웨어 트리거: 엣지(상승/하강) 설정 (SDK Enum 사용) + try: + camera.operation_mode = OPERATION_MODE.HARDWARE_TRIGGERED + camera.frames_per_trigger_zero_for_unlimited = 1 + if CAMERA_TRIGGER_EDGE.lower() == "rising": + camera.trigger_polarity = TRIGGER_POLARITY.ACTIVE_HIGH # Low→High (Rising) + print("카메라 트리거 극성: ACTIVE_HIGH (Rising, Low→High)") + else: + camera.trigger_polarity = TRIGGER_POLARITY.ACTIVE_LOW # High→Low (Falling) + print("카메라 트리거 극성: ACTIVE_LOW (Falling, High→Low)") + except Exception as e: + print(f"트리거 모드/극성 설정 실패: {e}") + + camera.exposure_time_us = 20000 # 20 ms 노출 (단위: 마이크로초) + camera.frames_per_trigger_zero_for_unlimited = 1 + camera.image_poll_timeout_ms = 1000 + # 하드웨어 트리거 사용 시 내부 프레임레이트 제어는 비활성화 + camera.is_frame_rate_control_enabled = False + + # Pre-emptively disable binning before ROI to avoid scaled image sizes + try: + camera.bin_x = 1 + camera.bin_y = 1 + except Exception: + pass + + # --- Full-frame reset before applying ROI (prevents tiny 80x48 frames) --- + try: + # Try common SDK attributes for full sensor size + full_h = int(getattr(camera, "sensor_height_pixels", 0) or getattr(camera, "maximum_image_height_pixels", 0) or 0) + full_w = int(getattr(camera, "sensor_width_pixels", 0) or getattr(camera, "maximum_image_width_pixels", 0) or 0) + if full_h <= 0 or full_w <= 0: + # Fallback: if roi_range is available, use its max dimensions + rr = getattr(camera, "roi_range", None) + if rr and hasattr(rr, "maximum_width") and hasattr(rr, "maximum_height"): + full_w = int(rr.maximum_width) + full_h = int(rr.maximum_height) + if full_h > 0 and full_w > 0: + # Turn off binning first to avoid scaled limits + try: + camera.bin_x = 1 + camera.bin_y = 1 + except Exception: + pass + for setter in ( + lambda: setattr(camera, "roi", (0, 0, full_w, full_h)), + lambda: camera.set_roi(0, 0, full_w, full_h), + ): + try: + setter(); break + except Exception: + pass + print(f"Full-frame reset: {full_w}x{full_h} 요청") + except Exception as e: + print(f"Full-frame reset 실패(무시): {e}") + + # ---- 하드웨어 ROI/비닝 설정 (장치 지원 시) ---- + try: + img_h = int(getattr(camera, "image_height_pixels", 0) or 0) + img_w = int(getattr(camera, "image_width_pixels", 0) or 0) + if img_h < 200 or img_w < 200: + # Suspect prior sub-sampling; re-read full limits + fh = int(getattr(camera, "sensor_height_pixels", 0) or getattr(camera, "maximum_image_height_pixels", 0) or 0) + fw = int(getattr(camera, "sensor_width_pixels", 0) or getattr(camera, "maximum_image_width_pixels", 0) or 0) + if fh > 0 and fw > 0: + img_h, img_w = fh, fw + if img_h <= 0 or img_w <= 0: + raise RuntimeError("카메라 해상도 조회 실패") + + applied = False + if str(ROI_MODE).upper() == "AUTO": + # 중앙 정사각형 하드웨어 ROI 적용 + box = int(max(ROI_MIN_SIZE, min(ROI_AUTO_SIZE, img_h, img_w))) + cy = img_h // 2 + cx = img_w // 2 + x = max(0, cx - box // 2) + y = max(0, cy - box // 2) + w = min(box, img_w - x) + h = min(box, img_h - y) + # 일부 카메라는 정렬 제약(예: 8/16 align)이 있어 두 번 시도 + for setter in ( + lambda: setattr(camera, "roi", (x, y, w, h)), + lambda: camera.set_roi(x, y, w, h), + ): + try: + setter(); applied = True; break + except Exception: + pass + if applied: + print(f"HW ROI(AUTO) 적용: x={x}, y={y}, w={w}, h={h}") + else: + print("HW ROI(AUTO) 적용 실패: 장치에서 ROI 속성을 지원하지 않음 (소프트웨어 크롭으로 대체).") + else: + # MANUAL: 중앙 500x500(정렬 보장)으로 하드웨어 ROI 적용 + size_req = min(int(ROI_TARGET_SIZE), int(img_h), int(img_w)) + size_aligned = max(ROI_MIN_SIZE, (size_req // ROI_ALIGN) * ROI_ALIGN) + size_aligned = min(size_aligned, img_h, img_w) + cx, cy = img_w // 2, img_h // 2 + req_x = max(0, min(cx - size_aligned // 2, img_w - size_aligned)) + req_y = max(0, min(cy - size_aligned // 2, img_h - size_aligned)) + req_w = size_aligned + req_h = size_aligned + for setter in ( + lambda: setattr(camera, "roi", (req_x, req_y, req_w, req_h)), + lambda: camera.set_roi(req_x, req_y, req_w, req_h), + ): + try: + setter(); applied = True; break + except Exception: + pass + if applied: + print(f"HW ROI(MANUAL) 적용: x={req_x}, y={req_y}, w={req_w}, h={req_h}") + if max(req_w, req_h) >= 480: + print("NOTE: ROI>=~500px → 25 Hz(40 ms) 트리거 주기 권장 (리드아웃 여유 확보)") + else: + print("HW ROI(MANUAL) 적용 실패: 장치에서 ROI 속성을 지원하지 않음 (소프트웨어 크롭으로 대체).") + except Exception as e: + print(f"HW ROI 설정 중 예외: {e}") + + # 비닝 해제(1x1) — 500x500 해상도 확보를 위해 강제 + try: + binned_off = False + for setter in ( + lambda: setattr(camera, "bin_x", 1), + lambda: setattr(camera, "bin_y", 1), + lambda: setattr(camera, "binning", 1), + lambda: camera.set_binx(1), + lambda: camera.set_biny(1), + lambda: camera.set_binning(1), + ): + try: + setter(); binned_off = True + except Exception: + pass + if binned_off: + print("HW 비닝 설정: 1x1 (무비닝)") + else: + print("HW 비닝 변경 불가: 장치가 해당 속성을 노출하지 않음.") + except Exception as e: + print(f"HW 비닝 설정 중 예외: {e}") + # ---------------------------------------------- + + # 카메라가 보고하는 실제 해상도 기준으로 ROI 경계 확정 + try: + _validate_and_set_roi_bounds(camera.image_height_pixels, camera.image_width_pixels) + try: + h_eff = int(roi_y1_valid - roi_y0_valid) + if h_eff >= 480: + print("GUIDE: 큰 ROI(>=~500px)에서는 25 Hz 트리거(40 ms 주기)로 운용하세요 — 프레임 드롭 방지.") + except Exception: + pass + except Exception as e: + print(f"ROI 경계 확정 실패: {e}") + + print(f"Camera image size: {camera.image_width_pixels} x {camera.image_height_pixels}") + + # 하드웨어 트리거 수신을 위해 충분한 내부 버퍼 확보 (드롭 방지) + camera.arm(200) + print("카메라 ARM 완료 (하드웨어 트리거 대기 상태)") + camera_ready = True # 카메라 준비 신호 즉시 전달 + + last_report = time.time() + last_frames_local = 0 + wait_strikes = 0 + + try: + while True: + if abort_event.is_set(): + break + with capture_lock: + if frames_captured >= n_frames: + break + frame = camera.get_pending_frame_or_null() + if frame is not None: + image_buffer_copy = np.copy(frame.image_buffer) + img = image_buffer_copy.reshape( + camera.image_height_pixels, camera.image_width_pixels + ) + # 검증된 ROI 경계 사용 (항상 비어있지 않도록 보장) + y0 = roi_y0_valid if roi_y0_valid is not None else 0 + y1 = roi_y1_valid if roi_y1_valid is not None else img.shape[0] + x0 = roi_x0_valid if roi_x0_valid is not None else 0 + x1 = roi_x1_valid if roi_x1_valid is not None else img.shape[1] + roi = img[y0:y1, x0:x1] + if roi.size == 0: + # 최후 보루: 전체 프레임 사용 및 경계 재설정 + print("ERROR: ROI가 비어있음 → 전체 프레임으로 대체 후 AUTO 재계산") + roi = img + _validate_and_set_roi_bounds(img.shape[0], img.shape[1]) + # 타임스탬프를 소비자에 전달하기 위해 frame.frame_count와 timestamp를 함께 전달 + ts_rel_ns = getattr(frame, "time_stamp_relative_ns_or_null", None) + frame_queue.put(((frame.frame_count, ts_rel_ns), roi)) + with capture_lock: + frames_captured += 1 + last_frames_local = frames_captured + last_report = time.time() + # 디버깅: 매 1000프레임마다 진행상황 출력 + if frames_captured % 1000 == 0: + print(f"DEBUG: 현재까지 {frames_captured} 프레임 수집됨") + wait_strikes = 0 + # 외부 트리거 대기 상태 감시: 일정 시간 프레임이 없으면 안내 로그 + if time.time() - last_report > 2.0 and frames_captured == last_frames_local: + print("WAIT: 아직 수신 프레임 없음 (외부 트리거 대기 중). 트리거 케이블/극성/레벨을 확인하세요.") + wait_strikes += 1 + if wait_strikes >= 8: + print("ABORT: 프레임 무수신 상태가 지속되어 측정을 중단합니다.") + abort_event.set() + break + last_report = time.time() + # 프레임 번호 및 현재까지 수집된 프레임 정보 출력 + # 삭제됨: print(f"프레임 #{frame.frame_count} 저장 (총 {frames_captured}/{n_frames})") + except KeyboardInterrupt: + print("카메라 Producer 종료 (KeyboardInterrupt)") + finally: + camera.disarm() + # 소비자 종료 신호 + try: + frame_queue.put_nowait((None, None)) + except Exception: + pass + +# ----------------------------------------- +# 카메라 Consumer 함수 (ROI 영역 처리: 총 intensity 합 계산 및 디버깅 로그 추가) +# ----------------------------------------- +def camera_consumer(): + global measurement_complete, intensity_dict + # 안정 진입 판정 파라미터 (25 Hz 기준) + PREROLL_SEC = 1.0 + STABLE_N = 25 + STABLE_T = 0.040 # 40 ms (25 Hz) + STABLE_TOL = 0.0040 # ±4.0 ms 허용 (wall clock fallback 대비) + stable_mode = False + stable_count = 0 + last_ts = None + first_data_ts = None + + # 주파수별 intensity 누적 (평균/라이브 플롯용) + intensity_dict = defaultdict(list) + processed_frames = 0 + + h5 = None + d_roi = d_frame = d_freq = None + h5_index = 0 + h5_ready = False # 모든 dataset 정상 생성 여부 + h5_disabled = False # 초기화 실패 시 추가 시도/에러 스팸 방지 + + # --- ON/OFF 페어 기반 ODMR 대비 계산을 위한 버퍼 --- + pair_on_buf = {} + pair_off_buf = {} + contrast_dict = defaultdict(list) # key: freq(Hz), value: [contrast_pct...] + # HDF5 확장: mw_on(0/1), pair_id(int64) + d_mw_on = None + d_pair = None + + if PRINT_PER_FRAME: + print("Consumer 시작: 프레임-주파수 매핑 및 워밍업 스킵 동작 준비") + + # 한 주파수당 2프레임(ON/OFF) 사용 → 한 바퀴=2*mw_steps + warmup_skip = 2 * mw_steps + target_frames = n_frames - warmup_skip + + # HDF5 지연 초기화 (첫 유효 ROI 크기 파악 후 생성) + h5 = None + d_roi = d_frame = d_freq = None + h5_index = 0 + + # 카메라 프레임카운트에 의존하지 않도록 로컬 카운터 사용 + seen = 0 + + while processed_frames < target_frames: + try: + meta, roi = frame_queue.get(timeout=0.5) + # 종료 신호 처리 + if meta is None: + break + frame_num, ts_rel_ns = meta + + # 프레임 타임스탬프 확보 (ns 단위가 제공되면 우선 사용) + if ts_rel_ns is not None: + ts_now = ts_rel_ns * 1e-9 # ns → s + else: + ts_now = time.time() + # --- Diagnostics (A): timestamp source check, print once --- + try: + if (processed_frames == 0): + if ts_rel_ns is None: + print("INFO: camera timestamp is None → using wall clock for Δt (stability tolerance relaxed to ±1.5 ms).") + else: + print("INFO: camera timestamp detected (ns) → using hardware Δt.") + except Exception: + pass + if first_data_ts is None: + first_data_ts = ts_now + + # PREROLL: 카메라 ARM/CH1 시작 후 초기 구간 무시 + if (ts_now - first_data_ts) < PREROLL_SEC and not stable_mode: + if PRINT_PER_FRAME and (processed_frames % 50 == 0): + print(f"PREROLL 진행 중: {(ts_now - first_data_ts):.2f}s") + continue + + # 안정성 판정: 직전 프레임과의 간격이 목표 주기(40 ms)±tol인지 검사 + if last_ts is None: + last_ts = ts_now + continue + dt = ts_now - last_ts + # --- Diagnostics (A): print first few Δt samples --- + if 'diag_dt_prints' not in locals(): + diag_dt_prints = 0 + if diag_dt_prints < 20: + try: + src = "ns" if (ts_rel_ns is not None) else "wall" + print(f"Δt sample[{diag_dt_prints+1}]: {dt*1e3:.3f} ms (src={src})") + diag_dt_prints += 1 + except Exception: + pass + last_ts = ts_now + if abs(dt - STABLE_T) <= STABLE_TOL: + stable_count += 1 + else: + stable_count = 0 + + if not stable_mode: + if stable_count >= STABLE_N: + stable_mode = True + print(f"STABLE 진입: Δt≈{STABLE_T*1000:.1f} ms 범위 내 연속 {STABLE_N} 프레임 확인 → 본측정 시작") + # 안정 진입 시점에 SynthHD(CH2) 시작 신호 전송 + try: + synth_start_event.set() + except Exception: + pass + # 카운터/버퍼 초기화: 안정 이전 데이터는 버리고 새로 시작 + intensity_dict = defaultdict(list) + processed_frames = 0 + seen = 0 + # HDF5도 안정 이후부터 생성하도록 초기화 리셋 + h5 = None + d_roi = d_frame = d_freq = None + h5_index = 0 + h5_ready = False + h5_disabled = False + else: + if PRINT_PER_FRAME and (processed_frames % 50 == 0): + print(f"STABILIZING... ({stable_count}/{STABLE_N})") + continue + + seen += 1 # stable_mode 진입 후에만 카운트 + + # MW 주파수/ONOFF 계산 (로컬 카운터 기반) + # SynthHD 리스트를 [f0_on, f0_off, f1_on, f1_off, ...] 순으로 구성했다고 가정 + step_index = (seen - warmup_skip - 1) % (2 * mw_steps) # 0..(2*mw_steps-1) + pair_idx = step_index // 2 # 0..(mw_steps-1) + mw_on = 1 if (step_index % 2 == 0) else 0 # 짝수=ON, 홀수=OFF + freq = mw_start + pair_idx * mw_step # Hz + + if PRINT_PER_FRAME: + onoff = 'ON ' if mw_on else 'OFF' + print(f"#{int(frame_num)} [{onoff}] freq={freq/1e9:.3f} GHz (pair {pair_idx+1}/{mw_steps})") + + # --- Warmup 1 sweep: 로그만 남기고 저장/누적 스킵 --- + if seen <= warmup_skip: + if PRINT_PER_FRAME and ((seen % 5) == 0 or seen == warmup_skip): + print(f"WARMUP skip {seen}/{warmup_skip}") + continue + + # 총 intensity 합 (오버플로 방지) + s = roi.astype(np.uint32).sum() + # HDF5 초기화 (안정 이후 첫 저장 시 1회) + if SAVE_HDF5 and HAS_H5PY and (not h5_disabled) and (not h5_ready): + try: + if h5 is None: + h5_path = os.path.join(run_dir, "data.h5") + h5 = h5py.File(h5_path, "w") + h, w = roi.shape + d_roi = h5.create_dataset("roi", shape=(0,h,w), maxshape=(None,h,w), + dtype=np.uint16, chunks=(1,h,w), compression="gzip") + d_frame = h5.create_dataset("frame_num", shape=(0,), maxshape=(None,), + dtype=np.int64, chunks=True, compression="gzip") + d_freq = h5.create_dataset("freq_hz", shape=(0,), maxshape=(None,), + dtype=np.float64, chunks=True, compression="gzip") + d_mw_on = h5.create_dataset("mw_on", shape=(0,), maxshape=(None,), + dtype=np.uint8, chunks=True, compression="gzip") + d_pair = h5.create_dataset("pair_id", shape=(0,), maxshape=(None,), + dtype=np.int64, chunks=True, compression="gzip") + h5_index = 0 + h5_ready = True + print(f"HDF5 준비 완료: {os.path.basename(h5.filename)} (roi={h}x{w})") + except Exception as e: + print(f"HDF5 초기화 실패: {e} — HDF5 저장 비활성화") + try: + if h5 is not None: + h5.close() + except Exception: + pass + h5 = None + d_roi = d_frame = d_freq = None + d_mw_on = d_pair = None + h5_disabled = True + + # ROI 저장 (HDF5 우선, 실패 시 옵션에 따라 .txt) + if SAVE_HDF5 and HAS_H5PY and h5_ready and (not h5_disabled): + try: + d_roi.resize((h5_index + 1, d_roi.shape[1], d_roi.shape[2])) + d_roi[h5_index, :, :] = roi.astype(np.uint16) + d_frame.resize((h5_index + 1,)); d_frame[h5_index] = int(frame_num) + d_freq.resize((h5_index + 1,)); d_freq[h5_index] = float(freq) + d_mw_on.resize((h5_index + 1,)); d_mw_on[h5_index] = np.uint8(mw_on) + d_pair.resize((h5_index + 1,)); d_pair[h5_index] = np.int64(pair_idx) + h5_index += 1 + except Exception as e: + print(f"HDF5 저장 실패 (frame {frame_num}): {e} — HDF5 저장 비활성화") + try: + if h5 is not None: + h5.close() + except Exception: + pass + h5 = None + d_roi = d_frame = d_freq = None + d_mw_on = d_pair = None + h5_ready = False + h5_disabled = True + elif SAVE_TXT: + # 텍스트 폴백: 런 폴더 내 단일 파일에 append + try: + txt_path = os.path.join(run_dir, "intensity.txt") + new_file = not os.path.exists(txt_path) + with open(txt_path, "a", encoding="utf-8") as f: + if new_file: + f.write("frame_num\tfreq_hz\tintensity_sum\n") + f.write(f"{int(frame_num)}\t{float(freq)}\t{int(s)}\n") + except Exception as e: + print(f"TXT 저장 실패 (frame {frame_num}): {e}") + + # 누적(평균 계산용) + intensity_dict[freq].append(int(s)) + processed_frames += 1 + + # --- ON/OFF 페어 누적 및 대비 계산 --- + if mw_on: + pair_on_buf[freq] = int(s) + else: + pair_off_buf[freq] = int(s) + + # 두 버퍼가 모두 채워지면 대비 계산 및 누적 + if freq in pair_on_buf and freq in pair_off_buf: + Ion = pair_on_buf.pop(freq) + Ioff = pair_off_buf.pop(freq) + if Ioff <= 0: + C_pct = 0.0 + else: + C_pct = (Ioff - Ion) / Ioff * 100.0 + contrast_dict[freq].append(C_pct) + # 전역 요약 카운터 업데이트 (스레드-세이프: GIL로 단일 증분은 안전) + try: + contrast_counts[freq] += 1 + except Exception: + pass + + # 라이브 플롯 업데이트: (A) 기본은 한 사이클마다, (B) 보조로 최소 8개 주파수 모이면 즉시 + should_push = LIVE_ODMR and ( ((seen - warmup_skip) % mw_steps == 0) or (len(intensity_dict) >= 8 and ((seen - warmup_skip) % 3 == 0)) ) + if should_push: + try: + freqs_sorted = sorted(intensity_dict.keys()) + # 방어: 유효한 주파수/데이터만 사용 + freqs_sorted = [f for f in freqs_sorted if len(intensity_dict[f]) > 0] + if len(freqs_sorted) >= 2: + # (A) PL(norm.)는 intensity 평균으로 유지 + y = np.array([np.mean(intensity_dict[f]) for f in freqs_sorted], dtype=float) + x = np.array([f / 1e9 for f in freqs_sorted], dtype=float) + y_max = float(y.max()) if y.size and y.max() > 0 else 1.0 + pl_norm = y / y_max + + # (B) ODMR contrast는 ON/OFF 페어 기반 중앙값 사용(견고) + have_contrast = [f for f in freqs_sorted if len(contrast_dict[f]) > 0] + if len(have_contrast) >= 2: + contrast_med = np.array([np.median(contrast_dict[f]) if len(contrast_dict[f])>0 else np.nan for f in freqs_sorted], dtype=float) + else: + contrast_med = np.full_like(pl_norm, np.nan, dtype=float) + + # NaN 방어: 대비가 없는 지점은 직전값 보간 또는 0으로 대체 + if np.all(np.isnan(contrast_med)): + contrast_med = np.zeros_like(pl_norm) + else: + # 간단 보간(선형) — 가장자리 NaN은 최근값 유지 + try: + idx = np.arange(len(contrast_med)) + m = np.isfinite(contrast_med) + if m.any(): + contrast_med[~m] = np.interp(idx[~m], idx[m], contrast_med[m]) + except Exception: + contrast_med = np.nan_to_num(contrast_med, nan=0.0) + + contrast_pct = contrast_med + + # 최신 데이터만 유지 (큐가 가득 차면 가장 오래된 항목 버림) + while not plot_queue.empty(): + try: + plot_queue.get_nowait() + except Exception: + break + plot_queue.put_nowait((x, pl_norm, contrast_pct)) + if PRINT_PER_FRAME and ((seen - warmup_skip) % 10 == 0): + print(f"PLOT push: n_freqs={len(freqs_sorted)}, x≈[{x[0]:.3f}..{x[-1]:.3f}] GHz") + except Exception as e: + if PRINT_PER_FRAME: + print(f"PLOT prepare err: {e}") + + except queue.Empty: + continue + + measurement_complete = True + + # HDF5 정리 및 요약 로그 작성 + try: + h5_path = None + if SAVE_HDF5 and HAS_H5PY and h5 is not None: + try: + h5.flush() + except Exception: + pass + h5_path = h5.filename + h5.close() + # 요약 정보 준비 + try: + freqs_sorted = sorted(intensity_dict.keys()) + summary_lines = [] + summary_lines.append(f"ended_at: {datetime.datetime.now().isoformat()}\n") + summary_lines.append(f"result: {'ABORT' if abort_event.is_set() else 'OK'}\n") + summary_lines.append(f"frames_captured: {frames_captured}\n") + summary_lines.append(f"frames_processed: {sum(len(v) for v in intensity_dict.values())}\n") + summary_lines.append(f"unique_freqs: {len(freqs_sorted)}\n") + if h5_path: + summary_lines.append(f"hdf5: {os.path.basename(h5_path)}\n") + except Exception: + summary_lines = [] + # ABORT 시 파일명 변경 + if abort_event.is_set(): + # rename data.h5 → data_abort.h5 (동일 폴더) + try: + if h5_path and os.path.exists(h5_path): + abort_path = os.path.join(run_dir, "data_abort.h5") + try: + if os.path.exists(abort_path): + os.remove(abort_path) + except Exception: + pass + os.replace(h5_path, abort_path) + summary_lines.append(f"hdf5_renamed: data_abort.h5\n") + except Exception as e: + summary_lines.append(f"hdf5_rename_error: {e}\n") + # abort summary + try: + with open(os.path.join(run_dir, 'abort_summary.txt'), 'w', encoding='utf-8') as f: + f.writelines(summary_lines) + except Exception: + pass + else: + # normal summary + try: + with open(os.path.join(run_dir, 'run_summary.txt'), 'w', encoding='utf-8') as f: + f.writelines(summary_lines) + except Exception: + pass + except Exception: + pass + +def plotter_mainloop(): + if not LIVE_ODMR: + return + fig = None + ax_pl = ax_con = line_pl = line_con = None + import matplotlib + try: + # GUI 백엔드 사용 (Windows에서 TkAgg 기본). 모든 plt 호출은 메인 스레드에서만. + matplotlib.rcParams["toolbar"] = "toolmanager" + except Exception: + pass + try: + plt.ion() + except Exception: + pass + last_update = time.time() + while True: + try: + x, pl_norm, contrast_pct = plot_queue.get(timeout=0.2) + try: + contrast_pl = 1.0 - (np.asarray(contrast_pct, dtype=float) / 100.0) + except Exception: + # Fallback if a scalar sneaks in + contrast_pl = 1.0 - (float(contrast_pct) / 100.0) + if fig is None: + fig, (ax_pl, ax_con) = plt.subplots(2, 1, sharex=True) + line_pl, = ax_pl.plot(x, pl_norm, marker='o') + line_con, = ax_con.plot(x, contrast_pl, marker='o') + ax_pl.set_ylabel("PL (norm.)") + ax_con.set_ylabel("1 − ΔPL/PL (norm.)") + ax_con.set_xlabel("Frequency (GHz)") + ax_pl.set_title("Live CW-ODMR") + # Force top at 1.0; bottom a bit below min for visibility + try: + ymin_pl = float(np.min(pl_norm)) if hasattr(pl_norm, '__len__') else float(pl_norm) + except Exception: + ymin_pl = 0.0 + try: + ymin_con = float(np.min(contrast_pl)) if hasattr(contrast_pl, '__len__') else float(contrast_pl) + except Exception: + ymin_con = 0.0 + ax_pl.set_ylim(max(0.0, ymin_pl - 0.001), 1.0) + ax_con.set_ylim(max(0.0, ymin_con - 0.001), 1.0) + else: + line_pl.set_xdata(x); line_pl.set_ydata(pl_norm) + line_con.set_xdata(x); line_con.set_ydata(contrast_pl) + # Force top at 1.0; bottom a bit below min for visibility + try: + ymin_pl = float(np.min(pl_norm)) if hasattr(pl_norm, '__len__') else float(pl_norm) + except Exception: + ymin_pl = 0.0 + try: + ymin_con = float(np.min(contrast_pl)) if hasattr(contrast_pl, '__len__') else float(contrast_pl) + except Exception: + ymin_con = 0.0 + ax_pl.set_ylim(max(0.0, ymin_pl - 0.001), 1.0) + ax_con.set_ylim(max(0.0, ymin_con - 0.001), 1.0) + # 항상 리림/오토스케일 후 강제 페인트 + try: + ax_pl.relim(); ax_pl.autoscale_view() + ax_con.relim(); ax_con.autoscale_view() + fig.canvas.draw_idle() + except Exception: + pass + plt.pause(0.01) + last_update = time.time() + except queue.Empty: + # 종료 조건: 측정 완료 이후 일정 시간동안 업데이트 없으면 종료 + if measurement_complete and (time.time() - last_update) > 0.5: + break + continue + try: + plt.ioff() + plt.show(block=False) + plt.pause(0.001) + except Exception: + pass + try: + plt.close('all') + except Exception: + pass + +# ----------------------------------------- +# 쓰레드 시작 및 데이터 수집 완료 +# ----------------------------------------- +sdg_thread = threading.Thread(target=sdg_control, daemon=True) +producer_thread = threading.Thread(target=camera_producer, daemon=True) +consumer_thread = threading.Thread(target=camera_consumer, daemon=True) + +sdg_thread.start() +producer_thread.start() +consumer_thread.start() + +# 라이브 플롯은 메인 스레드에서만 처리 (Tkinter 예외 방지) +plotter_mainloop() + +producer_thread.join() +consumer_thread.join() +sdg_thread.join() + +# 디버깅: intensity_dict에 저장된 데이터 개수 확인 +for freq in sorted(intensity_dict.keys()): + print(f"DEBUG: 주파수 {freq/1e9:.3f} GHz에 측정된 데이터 개수: {len(intensity_dict[freq])}") + +# 각 MW 주파수별 평균 intensity 계산 (각 주파수 당 1000회 측정이 목표) +frequencies = sorted(intensity_dict.keys()) +avg_intensities = [np.mean(intensity_dict[freq]) for freq in frequencies] + +# Contrast 개수 요약 (컨슈머가 누적해둔 전역 카운터 사용) +try: + for freq in frequencies: + cN = int(contrast_counts.get(freq, 0)) + print(f"DEBUG: 주파수 {freq/1e9:.3f} GHz 대비 샘플 수: {cN}") +except Exception: + pass diff --git a/Python/EXPERIMENT.md b/Python/EXPERIMENT.md new file mode 100644 index 0000000..68f2c9c --- /dev/null +++ b/Python/EXPERIMENT.md @@ -0,0 +1,36 @@ +# ODMR +Python code for ODMR experiment. + +## 1. CW ODMR + +### 1.1 Aparatus +- CMOS camera : CS165CU1/M - Zelux 1.6MP Color CMOS camera +- Dual-channel funciton/arbitrary waveform generators : Siglent SDG2082x +- Single-color cold visible mounted LED : Thorlabs M565L3 +- Object : Olympus UPlanFL N Objective (x5 ~ x 60) +- Long pass dichroic mirror : Thorlabs DMLP505 +- Dual channel microwave RF signal generator - Windfreak SynthHD +- And several convex lens with various focal length. + +### 1.2 Method: Continuous-Wave ODMR + +We want to obtain ODMR data of spin defect in hBN by using wide field imaging set-up. The experimental scheme follows the steps below: + +1. Turn on the LED continously to hBN. +2. Give microwave source to hBN. Since we have to observe the ODMR contrast signal for each frequency of MW, we should sweep the MW frequency from 3GHz to 4GHz with step of 50MHz. + +#### Triggering & Timing +To synchronize all sequence of equipment we adopt following triggering sequence. + +1. CH.1 of SDG2082x makes pulse signal with width 200μs and frequency 50Hz. CH.2 of SDG2082x makes flipped pulse signal(high 3.0V and make low pulse each period) with same width and frequency of CH.1 (200us and 50Hz) +2. Because of triggering pulse signal, SynthHD is set to single sweep step mode and moves one frequency step per trigger from 3GHz to 4GHz by 50MHz each time. The exposure time of camera is set to 1.5ms and the trigger arrives every 20ms, leaving approximately 18.5ms for readout. +3. Therefore each frame obtained from camera represents the data of each frequency. The data is saved as hdf5 file. The code makes index matching of frequency and frame number. +4. Then make 2D array whose x axis represents horizontal pixel number of camera in ROI and y axis represents vertical pixel number of camre in ROI. + +### 1.3 Code Control +To realize above method, the synchronization of all experimental equipments is controlled by Python code. + +- SDG2082x : Use pyvisa package to controll signal on/off, pulse width, pulse frequency, amplitude, and offset. +- CS165CU1/M Use python SDK from Thorlabs(thorlabs_tsi_sdk). The official document is in '/Users/woojins/Documents/GitHub/ODMR/Thorlabs_Camera_Python_API_Reference.pdf' and the official examples are in [Thorlabs Github](https://github.com/Thorlabs/Camera_Examples.git). Everytime to revise code, please check both official document and examples in Github precisely. +- Other parts also use python and conventional packages such as matplotlib, numpy, scipy and so on. +- **Data handling**: Use `h5py` to stream ROIs and metadata into `data.h5` (chunked, compressed). This is the default (`SAVE_HDF5=True`, `SAVE_TXT=False`). diff --git a/Python/Requirements.txt b/Python/Requirements.txt new file mode 100644 index 0000000..189b9b9 --- /dev/null +++ b/Python/Requirements.txt @@ -0,0 +1,3 @@ +pillow >= 5.4.1 +tifffile >= 2019.3.8 +numpy diff --git a/Python/Thorlabs_Camera_Python_API_Reference.pdf b/Python/Thorlabs_Camera_Python_API_Reference.pdf new file mode 100644 index 0000000..3f813f0 Binary files /dev/null and b/Python/Thorlabs_Camera_Python_API_Reference.pdf differ diff --git a/Python/__pycache__/windows_setup.cpython-313.pyc b/Python/__pycache__/windows_setup.cpython-313.pyc new file mode 100644 index 0000000..14ab2ef Binary files /dev/null and b/Python/__pycache__/windows_setup.cpython-313.pyc differ diff --git a/Python/aerodiode/AeroDIODE_Python_Library/CHANGELOG.md b/Python/aerodiode/AeroDIODE_Python_Library/CHANGELOG.md new file mode 100644 index 0000000..7db61e7 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/CHANGELOG.md @@ -0,0 +1,45 @@ +# Changelog + + +## 1.1.0 (November 14th, 2023) +- Add address to boards initialisation and allow mutiple device on a single COM port +- Fix modification and deactivation of timeout +- Add display of communication bytes for debug (comment) + + +## 1.0.8 (August 7th, 2023) +- Add setpoints for PDM mode and measure MOS temperature (Shaper v1.1.0) + + +## 1.0.7 (July 3rd, 2023) +- Add complilance voltage setpoint (PDMV5 v1.0.2) + + +## 1.0.6 (April 25th, 2023) +- Enhancement send_csv(TOMBAK v1.0.4) + + +## 1.0.5 (February 10th, 2023) +- Add setpoin Gate source and fine delay activation (TOMBAK v1.0.3) + + +## 1.0.4 (January 04th, 2023) +- Add timeout for the apply_all command to avoid timeout error after a send_shape for Shaper library (Shaper v1.0.3) + + +## 1.0.3 (October 25th, 2022) +- Improvement of the csv-reader to send the correct number of points with the send_shape (Shaper v1.0.3) + + +## 1.0.2 (October 17th, 2022) +- Add clock ext activation and Division setpoint for U64 frame (Tombak v1.0.2) + + +## 1.0.1 (October 14th, 2022) +- Version of all class +- Add SuperShape activation (Shaper v1.0.1) +- Cleanning alls files + + +## 1.0.0 (October 7th, 2022) +- First official release on gitlab \ No newline at end of file diff --git a/Python/aerodiode/AeroDIODE_Python_Library/README.md b/Python/aerodiode/AeroDIODE_Python_Library/README.md new file mode 100644 index 0000000..c2d600c --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/README.md @@ -0,0 +1,3 @@ +# LIB_COMMUN_PYTHON + +py setup.py install \ No newline at end of file diff --git a/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/PKG-INFO b/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/PKG-INFO new file mode 100644 index 0000000..70bbe45 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/PKG-INFO @@ -0,0 +1,23 @@ +Metadata-Version: 2.1 +Name: aerodiode +Version: 1.1.0 +Summary: Aerodiode device control +Home-page: UNKNOWN +Author: AeroDIODE +Author-email: +License: UNKNOWN +Keywords: python,optoelectronic,laser,aerodiode +Platform: UNKNOWN +Classifier: Development Status :: 1 - Planning +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python :: 3 +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Description-Content-Type: text/markdown + + +# LIB_COMMUN_PYTHON + +py setup.py install + diff --git a/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/SOURCES.txt b/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/SOURCES.txt new file mode 100644 index 0000000..d1bf4ed --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/SOURCES.txt @@ -0,0 +1,7 @@ +README.md +setup.py +aerodiode.egg-info/PKG-INFO +aerodiode.egg-info/SOURCES.txt +aerodiode.egg-info/dependency_links.txt +aerodiode.egg-info/requires.txt +aerodiode.egg-info/top_level.txt \ No newline at end of file diff --git a/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/dependency_links.txt b/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/requires.txt b/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/requires.txt new file mode 100644 index 0000000..8b93539 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/requires.txt @@ -0,0 +1,2 @@ +pyserial +csv-reader diff --git a/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/top_level.txt b/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/top_level.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/aerodiode.egg-info/top_level.txt @@ -0,0 +1 @@ + diff --git a/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/__init__.py b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/__init__.py new file mode 100644 index 0000000..88bb5d7 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/__init__.py @@ -0,0 +1,24 @@ +''' + __init__.py + Enables to create the required depedencies of the library. + To add a function or class tap the following line: + from aerodiode.name_file import name_function/name_class +''' + +#DO NOT MODIFY THIS PART +from aerodiode.aerodiode import Aerodiode +from aerodiode.aerodiode import Status +from aerodiode.aerodiode import StatusError +from aerodiode.aerodiode import ChecksumError +from aerodiode.aerodiode import ProtocolError +from aerodiode.aerodiode import ConnectionFailure +from aerodiode.aerodiode import ProtocolVersionNotSupported +#TO MODIFY according the client needs +from aerodiode.shaper import Shaper +from aerodiode.mmd import Mmd +from aerodiode.pdmv3 import Pdmv3 +from aerodiode.pdmv3_cw import Pdmv3_cw +from aerodiode.tombak import Tombak +from aerodiode.pdmv5 import Pdmv5 +from aerodiode.pdmv5_cw import Pdmv5_cw +from aerodiode.central import Central diff --git a/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/aerodiode.py b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/aerodiode.py new file mode 100644 index 0000000..ef2ccb7 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/aerodiode.py @@ -0,0 +1,419 @@ +'''' +Gather the Aerodiode and Status class: + -> Aerodiode enables Python to drive any Aerodiode's device + -> Status enables Python to manage the warning errors +''' + + +#Importations +import serial #library for the UART tranmission +from enum import Enum #requires for the error management +import struct #requires for the error management +import time + +#Déclaration de la classe +class Aerodiode(): + """ + Base Aerodiode's device communication. + An instance of :class:'Aerodiode' uses a serial port and is used by multiple :class:'CCS','SHAPER','CCM' etc... . + Aerodiode enables: + - the connection by the UART, open and close the port + - the send of query and the reception of response + - + """ + VERSION = "1.1.0" + #Constant + CONST_MAX_LENGHT_QUERY = 0xFA + DEFAULT_TIMEOUT = 0.5 + + #Commands + COMMAND_GET_ADD = 0x01 + COMMAND_READ_PROTOCOLE = 0x02 + COMMAND_READERR = 0x03 + COMMAND_WRITE = 0x10 + COMMAND_READ_INSTRUCTIONS = 0x11 + COMMAND_APPLY_ALL = 0x12 + COMMAND_SAVE_ALL = 0x13 + COMMAND_READ_MEASURE = 0x14 + + #xnn value + OFF = 0x00 + ON = 0x01 + + INTERNAL = 0x00 + EXTERNAL = 0x01 + + DISABLE = 0x00 + ENABLE = 0x01 + + MANUAL = 0x00 + AUTOMATIC = 0x01 + + INT_INT = 0x00 + EXT_INT = 0x01 + + NONE = 0x00 + SOFT = 0x01 + + ANALOG = 0x00 + NUMERIC = 0x01 + + EXTERNAL_TTL_LVTTL_PULSE = 0x00 + EXTERNAL_LVDS_PULSE = 0x01 + INTERNAL_PULSE = 0x02 + + #Opened port list + PORTS = [] + + + #Methods + def __init__(self, port, address = -1): + """ + Open serial device. + :param port: Serial device path. For instance '/dev/ttyUSB0' on linux, 'COM0' on Windows. + """ + self.checkPorts(port) + if(address == -1): + self._add = self.get_addr() #get the adress + else: + self._add = address #get the adress + + def __del__(self): + """ + Destructor automatically called at each end of script. + Close the serial port. + """ + if hasattr(self,'ser'): + self.ser.close() + + def checkPorts(self, port): + """ + Checks if the port is already used + """ + alreadyOpened = False + for i in range (len(Aerodiode.PORTS)) : + if(Aerodiode.PORTS[i].port == port) : + if not Aerodiode.PORTS[i].is_open: + self.ser.open() + self.ser = Aerodiode.PORTS[i] + alreadyOpened = True + + if not alreadyOpened : + self.ser = serial.Serial(port, baudrate=125000, timeout=Aerodiode.DEFAULT_TIMEOUT) #open serial port + Aerodiode.PORTS.append(self.ser) + self.ser.name + + def open(self): + """ + Open the serial port. + :param port: Serial device path. For instance '/dev/ttyUSB0' on linux, 'COM0' on Windows. + """ + if not self.ser.is_open: + self.ser.open() + + def close(self): + """ + Close the serial port. + """ + self.ser.close() + + def checksum(self, mess): + """ + Calculate the checksum of some data. + :param data: Input data bytes. + :return: Checksum byte value. + """ + chk = 0 + for byte in mess: + chk ^= byte #xor + return (chk-1)%256 #result < 255 chk is on 8 bit + + def receive_response(self): + """ + Receive a response. Verify the status and checksum. + :return: Received data, without header and checksum. + """ + # Get length byte. + data = self.ser.read(1) + # Length byte must be at least 3 for responses (length byte, status + # byte and checksum byte). + if len(data) == 0: + raise Timeout() + if data[0] < 3: + raise ProtocolError() + # Fetch all the bytes of the command + data += self.ser.read(data[0]-1) + + #print("answer = ",data.hex("\\")) #debug + + # Verify the checksum + if self.checksum(data[:-1]) != data[-1]: + raise ChecksumError() + # Verify the status + if data[1] != 0x00: + raise StatusError(data[1]) + return data[1:-1] + + def send_query(self, address, command, data = [], timeout=0.0): + """ + Transmit a command to the laser source. This method automatically add + the length and checksum bytes. + :param address: Device address. + :param command: An instance of Command define. + :param data: Data bytes. + :param address: Device address override. + """ + data = bytearray(data) + length = 4 + len(data) + if length > self.CONST_MAX_LENGHT_QUERY: + raise ValueError('data too long.') + frame = bytearray([length, address, command]) + data + frame.append(self.checksum(frame)) + + #print("request = ", frame.hex("\\")) #debug + + self.ser.write(frame) + + if timeout == -1: + self.ser.timeout = None + elif timeout != 0.0 : + self.ser.timeout = timeout + + res = self.receive_response() + self.ser.timeout = Aerodiode.DEFAULT_TIMEOUT + return res + + def write(self, consign_id, xnnvalue = [0], timeout = 0.0): #not usefull + """ + write instruction such as laser on + """ + + consign_id1 = consign_id//0xFF + consign_id0 = consign_id - (consign_id//0xFF)*0xFF + return self.send_query(self._add, self.COMMAND_WRITE, [consign_id1, consign_id0] + xnnvalue,timeout) + + + def get_addr(self): + """ + Get the adress of the card. + :return: the adress of the card + """ + answer = self.send_query(0x00, self.COMMAND_GET_ADD) + if self.status(answer) != 0: + return self.status + return int(answer[1]) #peut'être défaut si la carte à une adress >9 à vérifier + + def save_all(self, timeout = 0.0): + """ + Save instructions + """ + return self.send_query(self._add, self.COMMAND_SAVE_ALL, [], timeout) + + def apply_all(self, timeout = 0.0): + """ + Apply all the set in the card + """ + return self.send_query(self._add, self.COMMAND_APPLY_ALL, [], timeout) #used the timeout with the shaper + + def status(self, answer): + if (answer[0] == 0): + return 0 + else: + return answer[0] + + + def read_float_instruction(self, instruction): #vérifier que ce la s'adapte à tout type de meusure + """ + Get an instruction value from a specific consign. + :param consign: + :return: measure in float type + """ + data = self.send_query(self._add, self.COMMAND_READ_INSTRUCTIONS, [0x00, instruction]) #Read + return round(struct.unpack('>f', bytes(data[1::]))[0],10) + + def set_float_instruction(self, instruction, value = 0.0): + + """ + Set a float value. + :param instruction: + :param value: + """ + + if isinstance(value, float) == False and isinstance(value, int) == False: + return -2 #A améliorer + + value = float(value) + + size = len(self.send_query(self._add, self.COMMAND_READ_INSTRUCTIONS, [0x00, instruction])) - 1 + tab_value = [] + data = struct.pack('>f',value) + + for i in range(0, size): + tab_value.append(data[i]) + + return self.send_query(self._add, self.COMMAND_WRITE, [0x00, instruction]+tab_value) + + + def read_current_instruction(self, instruction): + return self.read_float_instruction(instruction) + + def set_current_instruction(self, instruction, value=0.0): + return self.set_float_instruction(instruction, value) + + def read_voltage_instruction(self, instruction): + return self.read_float_instruction(instruction) + + def set_voltage_instruction(self, instruction, value=0): + return self.set_float_instruction(instruction, value) + + def read_temperature_instruction(self, instruction): + return self.read_float_instruction(instruction) + + def set_temperature_instruction(self, instruction, value = 0.0): + return self.set_float_instruction(instruction, value) + + def read_percent_instruction(self, instruction): + return self.read_float_instruction(instruction) + + def set_percent_instruction(self, instruction, value = 0.0): + return self.set_float_instruction(instruction, value) + + def read_power_instruction(self, instruction): + return self.read_float_instruction(instruction) + + def set_power_instruction(self, instruction, value = 0.0): + return self.read_float_instruction(instruction) + + + def read_integer_instruction(self, instruction): + """ + Get an integer value from a specific consign. + :param instruction: + :return: value in integer type + """ + return int.from_bytes(self.send_query(self._add, self.COMMAND_READ_INSTRUCTIONS, [0x00, instruction]), byteorder = 'big', signed=False) + + def set_integer_instruction(self, instruction, value = 0): + """ + Set an integer value. + :param instruction: + :param: value + """ + + if isinstance(value, int) == False and isinstance(value, float): + return -1 #A améliorer + + value = int(value) + + size = len(self.send_query(self._add, self.COMMAND_READ_INSTRUCTIONS, [0x00, instruction])) - 1 + tab_value = [] + data = value.to_bytes(size, 'big') + for i in range(0, size): + tab_value.append(data[i]) + return self.send_query(self._add, self.COMMAND_WRITE, [0x00, instruction]+tab_value) + + def read_status_instruction(self, instruction): + return self.read_integer_instruction(instruction) + + def set_status_instruction(self, instruction, value = 0): + return self.set_integer_instruction(instruction, value) + + def read_time_instruction(self, instruction): + return self.read_integer_instruction(instruction) + + def set_time_instruction(self, instruction, value = 0): + return self.set_integer_instruction(instruction, value) + + def read_freq_instruction(self, instruction, value = 0): + return self.read_integer_instruction(instruction) + + def set_freq_instruction(self, instruction, value = 0): + return self.set_integer_instruction(instruction, value) + + def read_step_instruction(self, instruction): + return self.read_integer_instruction(instruction) + + def set_step_instruction(self, instruction, value = 0): + return self.set_integer_instruction(instruction, value) + + def measure(self, measureid, typ = 0): + ''' + Get a measure + :param measureid: + :param typ: data type int( = 0) or float( = 1) + ''' + if typ == 0: + return int.from_bytes(self.send_query(self._add, self.COMMAND_READ_MEASURE, [0x00, measureid]), byteorder = 'big', signed=False) + else: + data = self.send_query(self._add, self.COMMAND_READ_MEASURE, [0x00, measureid]) #Read + return round(struct.unpack('>f', bytes(data[1::]))[0],10) + + + + + + +#Status----------------------------------------------------------------------------- + +class Status(Enum): + """ Response status from the Aerodiode device. """ + OK = 0x00 + TIMEOUT = 0x01 + UNKNOWN_COMMAND = 0x02 + QUERY_ERROR = 0x04 + BAD_LENGTH = 0x08 + CHECKSUM_ERROR = 0x10 + +class StatusError(Exception): + """ + Thrown when an Aerodiode device did not respond with 'OK' status to the last + command. + """ + def __init__(self, status): + """ + :param status: Status code. int. + """ + super().__init__() + self.status = status + + def __str__(self): + return str(Status(self.status)) + +class ChecksumError(Exception): + """ Thrown if a communication checksum error is detected. """ + # AL pass + def __str__(self): + return 'ChecksumError' + + +class ProtocolError(Exception): + """ Thrown if an unexpected response from the device is received. """ + # AL pass + def __str__(self): + return 'ProtocolError' + + +class ConnectionFailure(Exception): + # AL pass + def __str__(self): + return 'ConnectionFailure' + + +class ProtocolVersionNotSupported(Exception): + """ + Thrown when a PDM protocol version is not (yet) supported by the library. + """ + def __init__(self, version): + """ + :param version: Version string. + """ + super().__init__() + self.version = version + + def __str__(self): + return self.version + + +class Timeout(Exception): + def __str__(self): + return 'Timeout' diff --git a/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/central.py b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/central.py new file mode 100644 index 0000000..5769975 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/central.py @@ -0,0 +1,330 @@ +#CCM +import serial +from enum import Enum +from aerodiode import Aerodiode +import csv +import struct + +class Central(Aerodiode): + VERSION = "1.1.0" + + INSTRUCT_LASER_ACTIVATION = 0x0A #ok + INSTRUCT_ALIGNEMENT_DIODE_ACTIVATION = 0x0F #ok + INSTRUCT_TRIGGER_FREQUENCY = 0x14 #ok + INSTRUCT_DELAY_MODE = 0x15 #ok + INSTRUCT_DELAY = 0x16 #ok + INSTRUCT_SECURITY_COEFF_MODE = 0x17 #ok + INSTRUCT_SECURITY_COEFF = 0x18 #ok + INSTRUCT_CURRENT_SLOPE_MODE = 0x19 #ok + INSTRUCT_CURRENT_SLOPE = 0x1A #ok + INSTRUCT_EM_GATE = 0x1B #ok + INSTRUCT_AUX_OFF = 0x1C #ok + INSTRUCT_LASER_POWER_MODE = 0x1D #ok + INSTRUCT_TRIGGER_MODE = 0x1E #ok + + ###????### #ok + INSTRUCT_WATCHDOG_PD_PULSE_PHOTO = 0x1F + + ##SMD1 LASER DIODE CONTROL #ok + INSTRUCT_SMD1_TEMPERATURE = 0x28 + INSTRUCT_SMD1_VALIM = 0x29 + INSTRUCT_SMD1_MODE = 0x2B + INSTRUCT_SMD1_CURRENT = 0x2C + INSTRUCT_SMD1_POWER = 0x2D + INSTRUCT_SMD1_MAX_CURRENT = 0x2E + INSTRUCT_SMD1_CURRENT_RATIO = 0x2F + + ##SMD2 LASER DIODE CONTROL #ok + INSTRUCT_SMD2_TEMPERATURE = 0x32 + INSTRUCT_SMD2_PULSED_MODE = 0x33 + INSTRUCT_SMD2_PULSE_WIDTH = 0x34 + INSTRUCT_SMD2_CURRENT = 0x35 + INSTRUCT_SMD2_OFFSET_CURRENT = 0x36 + INSTRUCT_SMD2_MAX_AVERAGE_CURRENT = 0x37 + INSTRUCT_SMD2_MAX_PEAK_CURRENT = 0x38 + INSTRUCT_SMD2_OPERATING_MODE = 0x39 + #INSTRUCT_OPERATING_MODE + + ##MMD #ok + INSTRUCT_MMD1_LASER_ACTIVATION = 0x3C + INSTRUCT_MMD2_LASER_ACTIVATION = 0x3D + INSTRUCT_MMD3_LASER_ACTIVATION = 0x3E + INSTRUCT_MMD_LASER_DIODE_CURRENT = 0x3F + + + + ##ALARMS + #SMD1_MAX_TEMPERATURE = 0x46 + #SMD1_MAX_TEMPERATURE = 0x47 + #MIN_TEMPERATURE = 0x48 + #MAX_TEMPERATURE = 0x49 + #LOW_PD_IN_LASER_POWER_THRESHOLD = 0x4A + #LOW_PD_OUT_LASER_POWER_THRESHOLD = 0x4B + #LOW_PD_CRI_LASER_POWER_THRESHOLD = 0x4C + #SMD2_AVERAGE_CURRENT_THRESHOLD = 0x4D + #HIGH_POWER_LASER_RETURN_ALARM = 0x4E + INSTRUCT_ALARM_ACTIVATION = 0x4F + INSTRUCT_INTERLOCK_IF_ALARM = 0x50 + INSTRUCT_DISABLE_1S_IF_ALARM = 0x51 + INSTRUCT_ALARM_HISTORY = 0x52 + + ##PHOTODIODE #ok + INSTRUCT_CALIBRATION_PD_IN = 0x5A + INSTRUCT_CALIBRATION_PD_INTER_CW = 0x5B + INSTRUCT_CALIBRATION_PD_OUT_CW = 0x5C + INSTRUCT_CALIBRATION_PD_CRI_CW = 0x5D + INSTRUCT_CALIBRATION_PD_BRA_CW = 0x5E + INSTRUCT_PD_APC_MODE = 0x5F + INSTRUCT_MODE_PD_OUT_CRI = 0x60 + INSTRUCT_MAX_ACC_TIME_BEFORE_PD_OUT = 0x61 + INSTRUCT_MAX_ACC_TIME_BEFORE_PD_CRI = 0x62 + INSTRUCT_PD_IN_PULSE_ALARM_TRIGGER_THRESHOLD = 0x63 + + ##PULSE PICKER CONTROL #ok + INSTRUCT_OPERATING_MODE = 0x8C + INSTRUCT_PULSE_IN_SOURCE = 0x8D + INSTRUCT_DELAY_PULSE_IN = 0x8E + INSTRUCT_DIVIDER_PULSE_IN = 0x8F + INSTRUCT_DELAY_PULSE_OUT = 0x90# + INSTRUCT_WIDTH_PULSE_OUT = 0x91# + INSTRUCT_PULSE_OUT_MODE = 0x92 + INSTRUCT_ACTIVATION_DELAY = 0x93 + INSTRUCT_ACTIVATION_PULSE_WIDTH = 0x94 + + #MEASURE ID + M_MAIN_V_MON = 0 + M_HK_V_MON = 1 + M_RED_GUIDE_VMON = 2 + M_SMD1_T = 3 + M_SMD2_T = 4 + M_CASE_T = 5 + M_SMD1_CURRENT = 6 + M_PD_OUT_POWER = 7 + M_PD_BRA_POWER = 8 + M_PD_IN_CW_POWER = 9 + M_PD_INTER_POWER = 10 + M_PD_CRI_POWER = 11 + M_PD_IN_PULSE_FREQUENCY = 12 + M_EXTERNAL_SYNC_FREQUENCY = 13 + M_LATCHED_INTERLOCKED_ALARMS = 15 + M_TIME_SINCE_ALARM_TRIGGERED = 16 + M_ALARM_ACTIVATED = 14 + M_PD_OUT_PULSE_ACC_TIME = 17 + M_PD_CRI_PULSE_ACC_TIME = 18 + M_FSM_CURRENT_STATE = 19 + M_ANALOGIC_SPARE_KK0 = 20 + M_ANALOGIC_SPARE_KK1 = 21 + + #VALUE + SOFTWARE = 0 + DB25 = 1 + NONE = 2 + + DIRECT_TRIG = 0 + INTENRAL = 1 + + DCC = 0 + DPC = 1 + + PD_OUT = 0 + PD_CRI = 1 + + PULSED = 0 + CW = 1 + + POS_LOGIC = 0 + NEG_LOGIC = 1 + + NO_MODE = 0 + PULSE_PICKER = 1 + HIGH = 2 + SYNC = 3 + + PD_PULSE = 0 + INT_SMD2 = 1 + EXT_SMD2 = 2 + + + + + def __init__(self, port, address = -1): + """ + Open serial device. + :param dev: Serial device path. For instance '/dev/ttyUSB0' on linux, 'COM0' on Windows. + """ + #Connection + Aerodiode.__init__(self,port,address) + + #Alarm length = 11 + self.alarm_enable = dict(ALIM_VMAIN = 1, ALIM_HK = 1, LASER_ALIGNEMENT = 0, T_SMD1 = 0, T_SMD2 = 0, MEAN_CURRENT_SMD2 = 0, + T_CASE = 0, BAD_START_SEQ = 0, CASE_OPEN = 0, EXT_SHUTDOWN = 0, PD_IN_LOW = 0) + self.alarm_behavior1 = dict(ALIM_VMAIN = 1, ALIM_HK = 1, LASER_ALIGNEMENT = 0, T_SMD1 = 0, T_SMD2 = 0, MEAN_CURRENT_SMD2 = 0, + T_CASE = 0, BAD_START_SEQ = 0, CASE_OPEN = 0, EXT_SHUTDOWN = 0, PD_IN_LOW = 0) #Interlock if alarm + self.alarm_behavior2 = dict(ALIM_VMAIN = 1, ALIM_HK = 1, LASER_ALIGNEMENT = 0, T_SMD1 = 0, T_SMD2 = 0, MEAN_CURRENT_SMD2 = 0, + T_CASE = 0, BAD_START_SEQ = 0, CASE_OPEN = 0, EXT_SHUTDOWN = 0, PD_IN_LOW = 0) #Disable 1s if alarm + self.alarm_status = dict(ALIM_VMAIN = 1, ALIM_HK = 1, LASER_ALIGNEMENT = 0, T_SMD1 = 0, T_SMD2 = 0, MEAN_CURRENT_SMD2 = 0, + T_CASE = 0, BAD_START_SEQ = 0, CASE_OPEN = 0, EXT_SHUTDOWN = 0, PD_IN_LOW = 0) + self.alarm_history = dict(ALIM_VMAIN = 1, ALIM_HK = 1, LASER_ALIGNEMENT = 0, T_SMD1 = 0, T_SMD2 = 0, MEAN_CURRENT_SMD2 = 0, + T_CASE = 0, BAD_START_SEQ = 0, CASE_OPEN = 0, EXT_SHUTDOWN = 0, PD_IN_LOW = 0) + self.time_since_alarm_triggered = dict(ALIM_VMAIN = 0, ALIM_HK = 0, LASER_ALIGNEMENT = 0, T_SMD1 = 0, T_SMD2 = 0, MEAN_CURRENT_SMD2 = 0, + T_CASE = 0, BAD_START_SEQ = 0, CASE_OPEN = 0, EXT_SHUTDOWN = 0, PD_IN_LOW = 0) + + + def read_alarm(self, alarm_id, alarm_dict, typ = 0): + ''' + Read alarm + :param alarm_id: + :param alarm_dict: data dictionnary which contains the alarm data + :param typ: Read Intruction(0)/Measure(1) + ''' + + if typ == 0: #read instruction + alarm_status_bit = "{0:018b}".format(self.read_integer_instruction(alarm_id))[::-1] #str type + else: #read measure + alarm_status_bit = "{0:018b}".format(self.measure(alarm_id))[::-1] #str type + + #print("alarm_status_bit = ", alarm_status_bit) + + j = 0; + for i in alarm_dict: + alarm_dict[i] = alarm_status_bit[j] + j += 1 + return alarm_dict; + + def set_alarm(self, alarm_id, alarm_dict): + ''' + Set alarm instructions only + :param alarm_id: + :param alarm_dict: data dictionnary which contains the alarm data + ''' + data = [0x00, alarm_id] + value_int = 0 + cmp = 0 + for i in alarm_dict: + value_int += int(alarm_dict[i])*2**cmp + cmp += 1 + v = value_int.to_bytes(4, 'big') #U32 Conversion + data.append(v[0]);data.append(v[1]);data.append(v[2]);data.append(v[3])#send U32 one by one + return self.send_query(self._add, self.COMMAND_WRITE, data) + + #Watchdog + def read_watchdog_pd_pulse_photo(self): + res = self.send_query(self._add, self.COMMAND_READ_INSTRUCTIONS, [0x00, self.WATCHDOG_PD_PULSE_PHOTO]) + max_period = res[0:4] + min_period = res[4:8] + return int.from_bytes(max_period,byteorder = 'big', signed=False), int.from_bytes(min_period,byteorder = 'big', signed=False) + + #Alarm manadgement + def read_alarm_enable(self): + return self.read_alarm(self.ALARM_ACTIVATION, self.alarm_enable) + + def read_alarm_behavior1(self): + return self.read_alarm(self.INTERLOCK_IF_ALARM, self.alarm_behavior1) + + def read_alarm_behavior2(self): + return self.read_alarm(self.DISABLE_1S_IF_ALARM, self.alarm_behavior2) + + def read_alarm_history(self): + return self.read_alarm(self.ALARM_HISTORY, self.alarm_behavior2) + + def read_alarm_status(self): + return self.read_alarm(self.M_ALARM_ACTIVATED, self.alarm_status, 1) + + def set_alarm_enable(self): + return self.set_alarm(self.ALARM_ACTIVATION, self.alarm_enable) + + def set_alarm_behavior1(self): + return self.set_alarm(self.INTERLOCK_IF_ALARM, self.alarm_behavior1) + + def set_alarm_behavior2(self): + return self.set_alarm(self.DISABLE_1S_IF_ALARM, self.alarm_behavior2) + + def set_alarm_history(self): + return self.set_alarm(self.ALARM_HISTORY, self.alarm_behavior2) + + #Measure + def measure_main_v_mon(self): + return self.measure(self.M_MAIN_V_MON, 1) + + def measure_hk_v_mon(self): + return self.measure(self.M_HK_V_MON, 1) + + def measure_red_guide_v_mon(self): + return self.measure(self.M_RED_GUIDE_VMON, 1) + + def measure_smd1_t(self): + return self.measure(self.M_SMD1_T, 1) + + def measure_smd2_t(self): + return self.measure(self.M_SMD2_T, 1) + + def measure_case_t(self): + return self.measure(self.M_CASE_T, 1) + + def measure_smd1_current(self): + return self.measure(self.M_SMD1_CURRENT, 1) + + def measure_pd_out_power(self): + return self.measure(self.M_PD_OUT_POWER, 1) + + def measure_pd_bra_power(self): + return self.measure(self.M_PD_BRA_POWER, 1) + + def measure_pd_in_cw_power(self): + return self.measure(self.M_PD_IN_CW_POWER, 1) + + def measure_pd_inter_power(self): + return self.measure(self.M_PD_INTER_POWER, 1) + + def measure_pd_cri_power(self): + return self.measure(self.M_PD_CRI_POWER, 1) + + def measure_pd_in_pulse_frequency(self): + return self.measure(self.M_PD_IN_PULSE_FREQUENCY) + + def measure_ext_sync_freq(self): + return self.measure(self.M_EXTERNAL_SYNC_FREQUENCY) + + def measure_latched_interlocked_alarm(self): + return self.measure(self.M_LATCHED_INTERLOCKED_ALARMS) + + def measure_time_alarm_since_alarm_triggered(self): + table = self.send_query(self._add, self.COMMAND_READ_MEASURE, [0x00, self.M_TIME_SINCE_ALARM_TRIGGERED]) + cmp = 0 + for i in self.time_since_alarm_triggered: + self.time_since_alarm_triggered[i] = int.from_bytes(table[cmp:cmp+4], byteorder = 'big', signed=False) + cmp += 1 + return self.time_since_alarm_triggered + + def measure_pd_out_pulse_acc_time(self): + return self.measure(self.M_PD_OUT_PULSE_ACC_TIME, 1) + + def measure_pd_cri_pulse_acc_time(self): + return self.measure(self.M_PD_CRI_PULSE_ACC_TIME, 1) + + def measure_fsm_current_state(self): + return self.measure(self.M_FSM_CURRENT_STATE) + + def measure_analog_spare_kk0(self): + return self.measure(self.M_ANALOGIC_SPARE_KK0, 1) + + def measure_analog_spare_kk1(self): + return self.measure(self.M_ANALOGIC_SPARE_KK1, 1) + + + + + + + + + + + + + + + + + + diff --git a/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/mmd.py b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/mmd.py new file mode 100644 index 0000000..f118d65 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/mmd.py @@ -0,0 +1,98 @@ +#CCM +import serial +from enum import Enum +from aerodiode import Aerodiode +import csv +import struct + +class Mmd(Aerodiode): + """ + Class to command one MMD Aerodiode's product + """ + VERSION = "1.1.0" + + #Instruct + INSTRUCT_LASER_MAX_CURRENT = 0x0A + INSTRUCT_DIODE_CURRENT = 0x0B + INSTRUCT_CURRENT_SOURCE = 0x0C + INSTRUCT_LASER_SLOPE = 0x0D + INSTRUCT_LASER_ACTIVATION = 0x0E + INSTRUCT_LASER_TEMPERATURE = 0x0F + INSTRUCT_DC_VOLTAGE = 0x11 + INSTRUCT_DC_VOLTAGE_MODE = 0x16 + INSTRUCT_APC_MODE = 0x17 + INSTRUCT_DC_MAX_VOLTAGE = 0x1D + INSTRUCT_TEC_MODE = 0x1F + INSTRUCT_FUNCTIONMENT_MODE = 0x22 + INSTRUCT_PULSE_FREQUENCY = 0x23 + INSTRUCT_PULSE_WIDTH = 0x24 + INSTRUCT_PULSE_LASER_MODE = 0x25 + INSTRUCT_PULSE_GATE_MODE = 0x26 + INSTRUCT_BURST_COUNT = 0x27 + + #Measure ID + M_LASER_CURRENT = 0x00 + M_LASER_DIODE_VOLTAGE = 0x01 + M_TEC_CURRENT = 0x02 + M_TEC_VOLTAGE = 0x03 + M_TEC_TEMPERATURE = 0x04 + M_CASE_TEMPERATURE = 0x05 + M_PD_EXT_1_INPUT = 0x06 + M_PD_EXT_2_INPUT = 0x07 + M_ALARM_STATE = 0x08 + + #ALARM + + + def __init__(self, port, address = -1): + """ + Open serial device. + :param dev: Serial device path. For instance '/dev/ttyUSB0' on linux, 'COM0' on Windows. + """ + #Connection + Aerodiode.__init__(self,port,address) + + #Alarm Status + self.alarm_status = dict(EXT_Alarm = 0, VPWR_Alarm = 0, Diode_Volatge_Alarm = 0, Case_Max_T_Alarm = 0, + Laser_Max_T_Alarm = 0,CPU_Alarm = 0, PD_EXT1_Alarm = 0, Spare = 0) + + + + def measure_laser_current(self): + return self.measure(self.M_LASER_CURRENT, 1) + def measure_laser_diode_voltage(self): + return self.measure(self.M_LASER_DIODE_VOLTAGE, 1) + def measure_tec_current(self): + return self.measure(self.M_TEC_CURRENT, 1) + def measure_tec_volatge(self): + return self.measure(self.M_TEC_VOLTAGE, 1) + def measure_case_temperature(self): + return self.measure(self.M_CASE_TEMPERATURE, 1) + def measure_pd_ext1_input(self): + return self.measure(self.M_PD_EXT_1_INPUT, 1) + def measure_pd_ext2_input(self): + return self.measure(self.M_PD_EXT_2_INPUT, 1) + + def read_alarm_status(self): + alarm_status_bit = "{0:08b}".format(self.measure(self.M_ALARM_STATE)) #str type + print(alarm_status_bit) + alarm_status_bit = alarm_status_bit[::-1] + + if int(alarm_status_bit,2) == 0xFFFFFFC0: + self.alarm_status['Spare'] = 1 + + j = 0 + for i in self.alarm_status: + if i != 'Spare': + self.alarm_status[i] = int(alarm_status_bit[j],2) + j += 1 + + return self.alarm_status + + + + + + + + diff --git a/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv3.py b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv3.py new file mode 100644 index 0000000..7aa55ba --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv3.py @@ -0,0 +1,61 @@ +#pdmv3 +import serial +from enum import Enum +from aerodiode import Aerodiode +import csv +import struct + +class Pdmv3(Aerodiode): + """ + Class to command one pdmv3 Aerodiode's product + """ + VERSION = "1.1.0" + + #Command + COMMAND_READ_CW_OR_PULSE = 0x20 + + #Instruction + INSTRUCT_SYNC_SOURCE = 0x0A + INSTRUCT_PULSE_SOURCE = 0x0B + INSTRUCT_FREQUENCY = 0x0C + INSTRUCT_PULSE_WIDTH = 0x0D + INSTRUCT_DELAY = 0x0E + INSTRUCT_OFFSET_CURRENT = 0x0F + INSTRUCT_CURRENT_PERCENT = 0x10 + INSTRUCT_TEMPERATURE = 0x11 + INSTRUCT_MAX_MEAN_CURRENT = 0x13 + INSTRUCT_MAX_PULSE_CURRENT = 0x14 + INSTRUCT_CURRENT_SOURCE = 0x15 + INSTRUCT_READ_INTERLOCK_STATUS = 0x1A + INSTRUCT_LASER_ACTIVATION = 0x1B + INSTRUCT_CW_PULSE_MODE = 0x1F + INSTRUCT_MODE_SELECTOR = 0x20 + + #xnn value + PULSE_MODE = 0x00 + CW_MODE = 0x01 + + HW = 0x00 + SW = 0x01 + + def __init__(self, port, address = -1): + """ + Open serial device. + :param dev: Serial device path. For instance '/dev/ttyUSB0' on linux, 'COM0' on Windows. + """ + #Connection + Aerodiode.__init__(self,port,address) + + def read_cw_or_pulse(self): + """ + Read if the ccs configuration is pulsed or continue + return 0 if pulsed + 1 if continue + """ + data = self.send_query(self._add, self.COMMAND_READ_CW_OR_PULSE, data = []) + return data[1] + + + + + diff --git a/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv3_cw.py b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv3_cw.py new file mode 100644 index 0000000..f798789 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv3_cw.py @@ -0,0 +1,36 @@ +#pdmv3_cw +import serial +from enum import Enum +from aerodiode import Aerodiode +import csv +import struct + +class Pdmv3_cw(Aerodiode): + """ + Class to command one pdmv3_cw Aerodiode's product + """ + VERSION = "1.1.0" + + #Command + COMMAND_READ_CW_OR_PULSE = 0x20 + + #Instruction + INSTRUCT_CURRENT_PERCENT = 0x10 + INSTRUCT_TEMPERATURE = 0x11 + INSTRUCT_MAX_MEAN_CURRENT = 0x13 + INSTRUCT_CURRENT_SOURCE = 0x15 + INSTRUCT_READ_INTERLOCK_STATUS = 0x1A + INSTRUCT_LASER_ACTIVATION = 0x1B + + def __init__(self, port, address = -1): + """ + Open serial device. + :param dev: Serial device path. For instance '/dev/ttyUSB0' on linux, 'COM0' on Windows. + """ + #Connection + Aerodiode.__init__(self,port,address) + + + + + diff --git a/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv5.py b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv5.py new file mode 100644 index 0000000..e4c8357 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv5.py @@ -0,0 +1,195 @@ +#ccsv5 +import serial +from enum import Enum +from aerodiode import Aerodiode +import csv +import struct + + +class Pdmv5(Aerodiode): + """ + Class to command one Ccs CW Aerodiode's product + """ + #Command + VERSION = "1.1.0" + + COMMAND_READ_CW_OR_PULSE = 0x20 + + #Instruction + INSTRUCT_TEMPERATURE_SET_POINT = 0x16 + INSTRUCT_COMPLIANCE_VOLTAGE = 0x1E + INSTRUCT_MAX_AVERAGE_CURRENT = 0x1F + INSTRUCT_LASER_ACTIVATION = 0x20 + INSTRUCT_PEAK_CURRENT_SETPOINT_SRC = 0x29 + INSTRUCT_MAX_PEAK_CURRENT_SETPOINT = 0x2A + INSTRUCT_PEAK_CURRENT_SETPOINT = 0x2B + INSTRUCT_SYNC = 0x2C + INSTRUCT_SYNC_FREQUENCY = 0x2E + INSTRUCT_PULSE_WIDTH = 0x2F + INSTRUCT_PULSE_IN_THRESHOLD = 0x31 + INSTRUCT_CW_CURRENT_SETPOINT_SRC = 0x3D + INSTRUCT_CW_MAX_CURRENT = 0x3E + INSTRUCT_CURRENT_SETPOINT = 0x3F + INSTRUCT_FUNCTIONING_MODE = 0x41 + INSTRUCT_MODULATION_ACTIVATION = 0x46 + INSTRUCT_CURRENT_MODULATION_MAX = 0x47 + INSTRUCT_CURRENT_MODULATION_SETPOINT = 0x48 + INSTRUCT_INTERNAL_MODULATION_TYPE = 0x49 + INSTRUCT_MODULATION_FREQUENCY = 0x4A + INSTRUCT_GAIN_EXT_MODULATION = 0x4B + INSTRUCT_BFM_GAIN = 0x50 + INSTRUCT_PD_EXT_GAIN = 0x51 + INSTRUCT_BFM_CONVERSION = 0x53 + INSTRUCT_PD_EXT_CONVERSION = 0x54 + INSTRUCT_INTER_TIME_MEASURE_LIV = 0x5A + INSTRUCT_LIVE_MEASURE_COUNT_AVERAGE = 0x5B + INSTRUCT_LIV_CURRENT_STEP = 0x5C + INSTRUCT_LIV_CURRENT_MIN = 0x5D + INSTRUCT_LIV_CURRENT_MAX = 0x5E + INSTRUCT_LIV_PHOTODIODE = 0x5F + INSTRUCT_PULSE_WIDTH_LIV = 0x60 + INSTRUCT_VCOMP_POTAR = 0x6F + + #Measure + M_EXT_INTERLOCK_STATE = 0x02 + M_LIV_MEASURE = 0x03 + M_PEAK_CURRENT_SETPOINT = 0x0A + M_CW_CURRENT_SETPOINT = 0x0B + M_T_SETPOINT = 0x0C + M_T_DIODE = 0x14 + M_TEC_VREF = 0x15 + M_TEC_CURRENT = 0x16 + M_T_EXT_CTN = 0X17 + M_T_MOS = 0x18 + M_TEC_VOLTAGE = 0x19 + M_DIODE_VOLTAGE = 0x1E + M_CW_CURRENT = 0x1F + M_AVERAGE_CURRENT = 0x20 + M_PULSED_CURRENT = 0x21 + M_BFM_CURRENT = 0x28 + M_PD_EXT_CURRENT = 0x29 + M_BFM_POWER = 0x2A + M_PD_EXT_POWER = 0x2B + M_ALARM_STATE = 0x46 + M_PICO_V_COMP = 0x36 + M_PICO_I_MON = 0x35 + M_VOLTAGE_IN = 0x3C + M_VOLTAGE_COMPLIANCE = 0x3D + M_VOLTAGE_5V = 0x3E + M_VOLTAGE_3V3 = 0x3F + M_VOLTAGE_2V5 = 0x40 + M_VOLTAGE_1V3 = 0x41 + M_VOLTAGE_NEG_5V = 0x42 + + + #XNN Value + INTERNAL = 0x00 + POTENTIOMETER = 0x01 + EXTERNAL = 0x02 + + BFM = 0x00 + PD_EXT = 0x01 + + ACC = 0x00 + APC = 0x01 + + SINUS = 0x01 + TRIANGLE = 0x02 + SQUARED = 0x03 + + + + + + + def __init__(self, port, address = -1): + """ + Open serial device. + :param dev: Serial device path. For instance '/dev/ttyUSB0' on linux, 'COM0' on Windows. + """ + #Connection + Aerodiode.__init__(self,port, address) + + #Alarm status (dict type) + self.alarm_status = dict(INTERLOCK = 0, PWR = 0, DIODE_TEMPERATURE = 0, TEMPERATURE_MOS = 0, + TEC_EXT = 0, BNC_INTERLOCK = 0, EXT_INTERLOCK = 0, INTERLOCK_KEY = 0, MOS_CW = 0, RESERVED = 0, OPEN_CICUIT = 0) + + def read_cw_or_pulse(self): + self.send_query(self._add, self.COMMAND_READ_CW_OR_PULSE, data = []) + + def set_vcomp_patch(self, instruction, value = 0): + return self.set_integer_instruction(instruction, value) + + def measure_interlock(self): + return self.measure(self.M_EXT_INTERLOCK_STATE, 0) + + def measure_liv_measure(self): + return self.measure(self.M_LIV_MEASURE, 0) + + def measure_peak_current_setpoint(self): + return self.measure(self.M_PEAK_CURRENT_SETPOINT, 1) + + def measure_cw_current_setpoint(self): + return self.measure(self.M_PEAK_CURRENT_SETPOINT, 1) + + def measure_temperature_setpoint(self): + return self.measure(self.M_T_SETPOINT, 1) + + def measure_temperature_diode(self): + return self.measure(self.M_T_DIODE , 1) + + def measure_tec_vref(self): + return self.measure(self.M_TEC_VREF , 1) + + def measure_tec_current(self): + return self.measure(self.M_TEC_CURRENT, 1) + + def measure_diode_voltage(self): + return self.measure(self.M_DIODE_VOLTAGE , 1) + + def measure_cw_current(self): + return self.measure(self.M_CW_CURRENT , 1) + + def measure_average_current(self): + return self.measure(self.M_AVERAGE_CURRENT , 1) + + def measure_pulsed_current(self): + return self.measure(self.M_PULSED_CURRENT , 1) + + def measure_bfm_current(self): + return self.measure(self.M_BFM_CURRENT, 1) + + def measure_pd_ext_current(self): + return self.measure(self.M_PD_EXT_CURRENT, 1) + + def measure_voltage_5v(self): + return self.measure(self.M_VOLTAGE_5V, 1) + + def measure_voltage_3v3(self): + return self.measure(self.M_VOLTAGE_3V3, 1) + + def measure_voltage_2v5(self): + return self.measure(self.M_VOLTAGE_2V5, 1) + + def measure_voltage_1v3(self): + return self.measure(self.M_VOLTAGE_1V3, 1) + + def measure_vcomp(self): + return self.measure(self.M_PICO_V_COMP, 1) + + def read_alarm_status(self): + alarm_status_bit = "{0:011b}".format(self.measure(self.M_ALARM_STATE)) #str type + alarm_status_bit = alarm_status_bit[::-1] + if int(alarm_status_bit,2) == 0xFFFFFFC0: + self.alarm_status['Spare'] = 1 + + j = 0 + for i in self.alarm_status: + if i != 'Spare': + self.alarm_status[i] = int(alarm_status_bit[j],2) + j += 1 + + return self.alarm_status + + + diff --git a/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv5_cw.py b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv5_cw.py new file mode 100644 index 0000000..0d8fed7 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/pdmv5_cw.py @@ -0,0 +1,156 @@ +#ccsv5 + + +import serial +from enum import Enum +from aerodiode import Aerodiode +import csv +import struct + + +class Pdmv5_cw(Aerodiode): + """ + Class to command one Ccs CW Aerodiode's product + """ + VERSION = "1.1.0" + + #Command + COMMAND_READ_CW_OR_PULSE = 0x20 + + #Instruction + INSTRUCT_TEMPERATURE_SET_POINT = 0x16 + INSTRUCT_MAX_AVERAGE_CURRENT = 0x1F + INSTRUCT_LASER_ACTIVATION = 0x20 + INSTRUCT_PEAK_CURRENT_SETPOINT_SRC = 0x29 + INSTRUCT_MAX_PEAK_CURRENT_SETPOINT = 0x2A + INSTRUCT_PEAK_CURRENT_SETPOINT = 0x2B + INSTRUCT_SYNC = 0x2C + INSTRUCT_SYNC_FREQUENCY = 0x2E + INSTRUCT_CW_CURRENT_SETPOINT_SRC = 0x3D + INSTRUCT_CW_MAX_CURRENT = 0x3E + INSTRUCT_CURRENT_SETPOINT = 0x3F + INSTRUCT_FUNCTIONING_MODE = 0x41 + INSTRUCT_MODULATION_ACTIVATION = 0x46 + INSTRUCT_CURRENT_MODULATION_MAX = 0x47 + INSTRUCT_CURRENT_MODULATION_SETPOINT = 0x48 + INSTRUCT_INTERNAL_MODULATION_TYPE = 0x49 + INSTRUCT_MODULATION_FREQUENCY = 0x4A + INSTRUCT_GAIN_EXT_MODULATION = 0x4B + INSTRUCT_BFM_GAIN = 0x50 + INSTRUCT_PD_EXT_GAIN = 0x51 + + #Measure + M_EXT_INTERLOCK_STATE = 0x02 + M_LIV_MEASURE = 0x03 + M_PEAK_CURRENT_SETPOINT = 0x0A + M_CW_CURRENT_SETPOINT = 0x0B + M_T_SETPOINT = 0x0C + M_T_DIODE = 0x14 + M_TEC_VREF = 0x15 + M_TEC_CURRENT = 0x16 + M_T_EXT_CTN = 0X17 + M_T_MOS = 0x18 + M_TEC_VOLTAGE = 0x19 + M_DIODE_VOLTAGE = 0x1E + M_CW_CURRENT = 0x1F + M_AVERAGE_CURRENT = 0x20 + M_PULSED_CURRENT = 0x21 + M_BFM_CURRENT = 0x28 + M_PD_EXT_CURRENT = 0x29 + M_ALARM_STATE = 0x46 + M_VOLTAGE_IN = 0x3C + M_VOLTAGE_COMPLIANCE = 0x3D + M_VOLTAGE_5V = 0x3E + M_VOLTAGE_3V3 = 0x3F + M_VOLTAGE_2V5 = 0x40 + M_VOLTAGE_1V3 = 0x41 + M_VOLTAGE_NEG_5V = 0x42 + + + + + + + def __init__(self, port, address = -1): + """ + Open serial device. + :param dev: Serial device path. For instance '/dev/ttyUSB0' on linux, 'COM0' on Windows. + """ + #Connection + Aerodiode.__init__(self,port, address) + + #Alarm status (dict type) + self.alarm_status = dict(INTERLOCK = 0, PWR = 0, DIODE_TEMPERATURE = 0, TEMPERATURE_MOS = 0, + TEC_EXT = 0, BNC_INTERLOCK = 0, EXT_INTERLOCK = 0, INTERLOCK_KEY = 0, MOS_CW = 0, RESERVED = 0, OPEN_CICUIT = 0) + + def read_cw_or_pulse(self): + self.send_query(self._add, self.COMMAND_READ_CW_OR_PULSE, data = []) + + def measure_interlock(self): + return self.measure(self.M_EXT_INTERLOCK_STATE, 0) + + def measure_liv_measure(self): + return self.measure(self.M_LIV_MEASURE, 0) + + def measure_peak_current_setpoint(self): + return self.measure(self.M_PEAK_CURRENT_SETPOINT, 1) + + def measure_cw_current_setpoint(self): + return self.measure(self.M_PEAK_CURRENT_SETPOINT, 1) + + def measure_temperature_setpoint(self): + return self.measure(self.M_T_SETPOINT, 1) + + def measure_temperature_diode(self): + return self.measure(self.M_T_DIODE , 1) + + def measure_tec_vref(self): + return self.measure(self.M_TEC_VREF , 1) + + def measure_tec_current(self): + return self.measure(self.M_TEC_CURRENT, 1) + + def measure_diode_voltage(self): + return self.measure(self.M_DIODE_VOLTAGE , 1) + + def measure_cw_current(self): + return self.measure(self.M_CW_CURRENT , 1) + + def measure_average_current(self): + return self.measure(self.M_AVERAGE_CURRENT , 1) + + def measure_pulsed_current(self): + return self.measure(self.M_PULSED_CURRENT , 1) + + def measure_bfm_current(self): + return self.measure(self.M_BFM_CURRENT, 1) + + def measure_pd_ext_current(self): + return self.measure(self.M_PD_EXT_CURRENT, 1) + + def measure_voltage_5v(self): + return self.measure(self.M_VOLTAGE_5V, 1) + + def measure_voltage_3v3(self): + return self.measure(self.M_VOLTAGE_3V3, 1) + + def measure_voltage_2v5(self): + return self.measure(self.M_VOLTAGE_2V5, 1) + + def measure_voltage_1v3(self): + return self.measure(self.M_VOLTAGE_1V3, 1) + + def read_alarm_status(self): + alarm_status_bit = "{0:010b}".format(self.measure(self.M_ALARM_STATE)) #str type + alarm_status_bit = alarm_status_bit[::-1] + if int(alarm_status_bit,2) == 0xFFFFFFC0: + self.alarm_status['Spare'] = 1 + + j = 0 + for i in self.alarm_status: + if i != 'Spare': + self.alarm_status[i] = int(alarm_status_bit[j],2) + j += 1 + + return self.alarm_status + diff --git a/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/shaper.py b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/shaper.py new file mode 100644 index 0000000..44e817d --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/shaper.py @@ -0,0 +1,413 @@ +#Shaper +import serial +from enum import Enum +from aerodiode import Aerodiode +import csv +import struct + +class Shaper(Aerodiode): + """ + Class to command one Shaper Aerodiode's product + """ + VERSION = "1.2.0" + + #SHAPE---------------------------------------------- + SHAPE1 = 0x00 + SHAPE2 = 0x01 + SHAPE3 = 0x02 + SHAPE4 = 0x03 + #Command-------------------------------------------- + COMMAND_SEND_SHAPE_DATA = 0x16 + COMMAND_SAVE_SHAPE_DATA = 0x17 + COMMAND_SET_INTER_SHAPE_TIME = 0x25 + COMMAND_PLAY_SHAPE = 0x26 + #Instruction--------------------------------------- + INSTRUCT_LASER_ACTIVATION = 0x0A + INSTRUCT_INTERSHAPE_DELAY_MODE = 0x0D + INSTRUCT_LASER_TEMPERATURE = 0x0E + INSTRUCT_MIN_TIME_BETWEEN_TWO_SHAPE = 0x0F + INSTRUCT_SHAPE_DEFAULT_OUTPUT_VALUE = 0x11 + INSTRUCT_OFFSET_CURRENT = 0x14 + INSTRUCT_CURRENT = 0x15 + INSTRUCT_MAX_VOLTAGE_MODULATION = 0x16 + INSTRUCT_PEAK_CURRENT_MODULATION = 0x19 + + #Instruction for PDM mode + INSTRUCT_PDM_OFFSET_CURRENT = 0x14 + INSTRUCT_PDM_SETPOINT_CURRENT = 0x15 + INSTRUCT_PDM_PULSE_MODE = 0x17 + INSTRUCT_PDM_MAX_PEAK_CURRENT = 0x18 + + #Instruction : TRIGGER + INSTRUCT_TRIGGER_CLOK = 0x1D + INSTRUCT_TRIGGER_SYNCHRO_SOURCE_A = 0x1E + INSTRUCT_TRIGGER_SYNCHRO_SOURCE_B = 0x1F + INSTRUCT_TRIGGER_SEQUENCE_SYNC_A = 0x20 + INSTRUCT_TRIGGER_SEQUENCE_SYNC_B = 0x21 + INSTRUCT_DELAY_SYNC_A_SYNC_B = 0x22 + INSTRUCT_INTERNAL_SYNC_FREQUENCY_A = 0x23 + #Instruction : Shape 1----------------------------- + INSTRUCT_SHAPE1_SYNC_FB_DELAY = 0x24 + INSTRUCT_SHAPE1_SYNC_FB_PULSE_WIDTH = 0x25 + INSTRUCT_SHAPE1_TRIG_OUT1_DELAY = 0x26 + INSTRUCT_SHAPE1_TRIG_OUT1_PULSE_WIDTH = 0x27 + INSTRUCT_SHAPE1_TRIG_OUT2_DELAY = 0x28 + INSTRUCT_SHAPE1_TRIG_OUT2_PULSE_WIDTH = 0x29 + INSTRUCT_SHAPE1_TRIG_OUT3_DELAY = 0x2A + INSTRUCT_SHAPE1_TRIG_OUT3_PULSE_WIDTH = 0x2B + INSTRUCT_SHAPE1_PDM_PULSE_DELAY = 0x2C + INSTRUCT_SHAPE1_PDM_PULSE_PULSE_WIDTH = 0x2D + INSTRUCT_SHAPE1_DELAY = 0x2E + + INSTRUCT_SHAPE1_STEP_COUT = 0x5A + INSTRUCT_SHAPE1_STEP_SIZE = 0x5B + INSTRUCT_SHAPE1_TRIGGER_STEP_SIZE = 0x5C + INSTRUCT_SHAPE1_INTERNSHAPE_OFFSET_TIME = 0x5D + INSTRUCT_SHAPE1_MAX_INTERNSHAPE_TIME_VALUE = 0x5E + #Instruction : Shape 2----------------------------- + INSTRUCT_SHAPE2_SYNC_FB_DELAY = 0x2F + INSTRUCT_SHAPE2_SYNC_FB_PULSE_WIDTH = 0x30 + INSTRUCT_SHAPE2_TRIG_OUT1_DELAY = 0x31 + INSTRUCT_SHAPE2_TRIG_OUT1_PULSE_WIDTH = 0x32 + INSTRUCT_SHAPE2_TRIG_OUT2_DELAY = 0x33 + INSTRUCT_SHAPE2_TRIG_OUT2_PULSE_WIDTH = 0x34 + INSTRUCT_SHAPE2_TRIG_OUT3_DELAY = 0x35 + INSTRUCT_SHAPE2_TRIG_OUT3_PULSE_WIDTH = 0x36 + INSTRUCT_SHAPE2_PDM_PULSE_DELAY = 0x37 + INSTRUCT_SHAPE2_PDM_PULSE_PULSE_WIDTH = 0x38 + INSTRUCT_SHAPE2_DELAY = 0x39 + + INSTRUCT_SHAPE2_STEP_COUT = 0x5F + INSTRUCT_SHAPE2_STEP_SIZE = 0x60 + INSTRUCT_SHAPE2_TRIGGER_STEP_SIZE = 0x61 + INSTRUCT_SHAPE2_INTERNSHAPE_OFFSET_TIME = 0x62 + INSTRUCT_SHAPE2_MAX_INTERNSHAPE_TIME_VALUE = 0x63 + #Instruction : Shape 3----------------------------- + INSTRUCT_SHAPE3_SYNC_FB_DELAY = 0x3A + INSTRUCT_SHAPE3_SYNC_FB_PULSE_WIDTH = 0x3B + INSTRUCT_SHAPE3_TRIG_OUT1_DELAY = 0x3C + INSTRUCT_SHAPE3_TRIG_OUT1_PULSE_WIDTH = 0x3D + INSTRUCT_SHAPE3_TRIG_OUT2_DELAY = 0x3E + INSTRUCT_SHAPE3_TRIG_OUT2_PULSE_WIDTH = 0x3F + INSTRUCT_SHAPE3_TRIG_OUT3_DELAY = 0x40 + INSTRUCT_SHAPE3_TRIG_OUT3_PULSE_WIDTH = 0x41 + INSTRUCT_SHAPE3_PDM_PULSE_DELAY = 0x42 + INSTRUCT_SHAPE3_PDM_PULSE_PULSE_WIDTH = 0x43 + INSTRUCT_SHAPE3_DELAY = 0x44 + + INSTRUCT_SHAPE3_STEP_COUT = 0x64 + INSTRUCT_SHAPE3_STEP_SIZE = 0x65 + INSTRUCT_SHAPE3_TRIGGER_STEP_SIZE = 0x66 + INSTRUCT_SHAPE3_INTERNSHAPE_OFFSET_TIME = 0x67 + INSTRUCT_SHAPE3_MAX_INTERNSHAPE_TIME_VALUE = 0x68 + INSTRUCT_SHAPE1_MAX_INTERNSHAPE_TIME_VALUE = 0x5E + #Instruction : Shape 4----------------------------- + INSTRUCT_SHAPE4_SYNC_FB_DELAY = 0x45 + INSTRUCT_SHAPE4_SYNC_FB_PULSE_WIDTH = 0x46 + INSTRUCT_SHAPE4_TRIG_OUT1_DELAY = 0x47 + INSTRUCT_SHAPE4_TRIG_OUT1_PULSE_WIDTH = 0x48 + INSTRUCT_SHAPE4_TRIG_OUT2_DELAY = 0x49 + INSTRUCT_SHAPE4_TRIG_OUT2_PULSE_WIDTH = 0x4A + INSTRUCT_SHAPE4_TRIG_OUT3_DELAY = 0x4B + INSTRUCT_SHAPE4_TRIG_OUT3_PULSE_WIDTH = 0x4C + INSTRUCT_SHAPE4_PDM_PULSE_DELAY = 0x4D + INSTRUCT_SHAPE4_PDM_PULSE_PULSE_WIDTH = 0x4E + INSTRUCT_SHAPE4_DELAY = 0x4F + + INSTRUCT_SHAPE4_STEP_COUT = 0x69 + INSTRUCT_SHAPE4_STEP_SIZE = 0x6A + INSTRUCT_SHAPE4_TRIGGER_STEP_SIZE = 0x6B + INSTRUCT_SHAPE4_INTERNSHAPE_OFFSET_TIME = 0x6C + INSTRUCT_SHAPE4_MAX_INTERNSHAPE_TIME_VALUE = 0x6D + + INSTRUCT_SUPER_SHAPE = 0x6E + + #INSTRUCTION GAINSWITCH------------------------------ + INSTRUCT_GAINSWITCH_CORRECTION_MODE = 0x82 + INSTRUCT_GAINSWITCH_CORRECTION_RAMP_AMPLITUDE = 0x83 + INSTRUCT_GAINSWITCH_CORRECTION_RAMP_DURATION = 0x84 + + #INSTRUCTION ALARMS---------------------------------- + INSTRUCT_ALARMS_ENABLE = 0xC8 + INSTRUCT_ALARMS_BEHAVIORS = 0xC9 + INSTRUCT_ALARMS_BEHAVIORS2 = 0xCD + + #MEASURE ID------------------------------------------ + MAIN_VOLTAGE = 0x00 + LASER_DIODE_TEMPERATURE = 0x01 + ALARM_STATE = 0x02 + AVERAGE_OPTICAL_POWER = 0x03 + ALARM_TRIGGERED_INTERLOCK = 0X04 + MOS_MPDM_TEMPERATURE = 0x06 + + #XNNVALUE------------------------------------------- + VALUE_LASER_OFF = 0x00 + VALUE_LASER_ON = 0x01 + + SYNC_EXT0 = 0x00 + SYNC_EXT1 = 0x01 + SYNC_EXTKK = 0x02 + SYNC_INTERNAL = 0x03 + SYNC_NONE = 0x04 + + ALARM_NOT_ENABLE = 0x00 + ALARM_ENABLE = 0x01 + + GAINSWITCH_CORRECTION_MODE_ON = 0x01 + GAINSWITCH_CORRECTION_MODE_OFF = 0x00 + + INTERSHAPE_DELAY_MODE_ANALOG = 0x00 + INTERSHAPE_DELAY_MODE_TRIG_SOFT = 0x01 + + TRIGGER_CLOCK_INT = 0x00 + TRIGGER_CLOCK_EXT = 0x01 + + #Methods + def __init__(self, port, address = -1): + """ + Open serial device. + :param dev: Serial device path. For instance '/dev/ttyUSB0' on linux, 'COM0' on Windows. + """ + #Connection + Aerodiode.__init__(self,port, address) + + #Alarm (dictionnary type) + self.alarm_enable = dict(laser_temperature = 0, MPDM_MOS_temperature = 0, reserved1 = 0, reserved2 = 0, + diode_average_power = 1,diode_average_current = 1, main_voltage = 1, aux_off = 1) + + self.alarm_behavior = dict(laser_temperature = 0, MPDM_MOS_temperature = 0, reserved1 = 0, reserved2 = 0, + diode_average_power = 0,diode_average_current = 0, main_voltage = 0, aux_off = 0) + + self.alarm_behavior2 = dict(laser_temperature = 0, MPDM_MOS_temperature = 0, reserved1 = 0, reserved2 = 0, + diode_average_power = 0,diode_average_current = 0, main_voltage = 0, aux_off = 0) + + self.alarm_state = dict(laser_temperature = 0, MPDM_MOS_temperature = 0, reserved1 = 0, reserved2 = 0, + diode_average_power = 0,diode_average_current = 0, main_voltage = 0, aux_off = 0) + + self.read_alarm_enable(); self.read_alarm_behavior(); self.read_alarm_behavior2() #set the alarm dicionnary with the MCU's value + + #Sequence (dictionnary type) + self.sequence_a = dict(LAST_VALID_ID = 0, ID8 = 0, ID7 = 0, ID6 = 0, ID5 = 0, ID4 = 0, ID3 = 0, ID2 = 0, ID1 = 0) + self.sequence_b = dict(LAST_VALID_ID = 0, ID8 = 0, ID7 = 0, ID6 = 0, ID5 = 0, ID4 = 0, ID3 = 0, ID2 = 0, ID1 = 0) + + + def read_csv(self, csv_file): + """ + Convert a csv file in list.. + :param csv_file: file path of your csv file + :return: a table from your csv file + """ + table_csv = [] + with open(csv_file) as file: #open csv as file variable + read = csv.reader(file) #read file variable + print('', end='\n') + for line in read: #read line by line + #convert a list of string to int + int_line = "".join(line) #step 1 convert the list in a string + int_line = int(int_line) #step 2 convert in int + table_csv.append(int_line) #add to the end of the table + del table_csv[0:2] #delete the 2 first data (length + '0') + return table_csv + + def send_csv(self, shape_id, table_csv): + """ + Send a csv_table to the Shaper. + :param shape id: bytes from 0x00 to 0x03 (4 shapes available) + :param shape table_csv: table of integer + :no return + """ + + if (len(table_csv)>4000): + raise ValueError('data must be inferior to 4000.') + + csv_output = [table_csv[i:i +60] for i in range(0, len(table_csv), 60)] #tableau 2D contenant des tableaux 1D qui sont les découpes de table_csv par pacquet de 50. + for i in range(0, len(csv_output)): + start = [0x00, 0x00] #Always start at the offset 0 which is declared on two bytes + + if (i>0): + length = (0x3C)*i #=(50)*i because the card receive the data 50 by 50 values + if (length < 0xFF):#verify if the new offset could be represented on one or two bytes + start[0] = length&0xFF + else: + start[0] = length&0xFF #first byte of the offset + start[1] = (length>>8)&0xFF #second byte of the offset + + csv_send = [] + for j in range(0,len(csv_output[i])): + value1 = csv_output[i][j]//(0x100) + value0 = csv_output[i][j] - (csv_output[i][j]//0x100)*0x100 + #value 1 and 0 enable to represent each data on two bytes + csv_send.append(value1) + csv_send.append(value0) + data = [shape_id, start[1], start[0]] + csv_send #concatenate the data + self.send_query(self._add, self.COMMAND_SEND_SHAPE_DATA, data) #send the csv_send + + + def play_shape(self, shape_id): + return self.send_query(self._add, self.COMMAND_PLAY_SHAPE, bytearray([shape_id])) + + def save_shape_data(self): + return self.send_query(self._add, self.COMMAND_SAVE_SHAPE_DATA) + + def set_inter_shape_time(self, shapeid, time): + data = [shapeid] + data_value = struct.pack('>f',time) + + for i in range(0, len(data_value)): + data.append(data_value[i]) + + return self.send_query(self._add, self.COMMAND_SET_INTER_SHAPE_TIME, data) + + def read_alarm(self, alarm_id, alarm_dict, typ = 0): + ''' + Read alarm + :param alarm_id: + :param alarm_dict: data dictionnary which contains the alarm data + :param typ: Read Intruction(0)/Measure(1) + ''' + + if typ == 0: #read instruction + alarm_status_bit = "{0:08b}".format(self.read_integer_instruction(alarm_id))[::-1] #str type on 8 bit + else: #read measure + alarm_status_bit = "{0:08b}".format(self.measure(alarm_id))[::-1] #str type + + j = 0; + for i in alarm_dict: + alarm_dict[i] = alarm_status_bit[j] + j += 1 + + return alarm_dict; + + def set_alarm(self, alarm_id, alarm_dict): + ''' + Set alarm instructions only + :param alarm_id: + :param alarm_dict: data dictionnary which contains the alarm data + ''' + data = [0x00, alarm_id] + value_int = 0 + cmp = 0 + for i in alarm_dict: + value_int += int(alarm_dict[i])*2**cmp + cmp += 1 + data.append(value_int) + return self.send_query(self._add, self.COMMAND_WRITE, data) + + def read_alarm_enable(self): + ''' + set alarm enable status from the MCU to the software + :return alarm_enable dictionnary such as 1 = ON and 0 = OFF + ''' + return self.read_alarm(self.INSTRUCT_ALARMS_ENABLE, self.alarm_enable) + + def set_alarm_enable(self): + ''' + set alarm enable status from the software to the MCU + :return alarm_enable dictionnary such as 1 = ON and 0 = OFF + ''' + return self.set_alarm(self.INSTRUCT_ALARMS_ENABLE, self.alarm_enable) + + def read_alarm_behavior(self): + ''' + set alarm behavior status from the MCU to the software + :return alarm_behavior dictionnary such as 1 = ON and 0 = OFF + ''' + return self.read_alarm(self.INSTRUCT_ALARMS_BEHAVIORS, self.alarm_behavior) + + def set_alarm_behavior(self): + ''' + set alarm behavior status from the software to the MCU + :return alarm_behavior dictionnary such as 1 = ON and 0 = OFF + ''' + return self.set_alarm(self.INSTRUCT_ALARMS_BEHAVIORS, self.alarm_behavior) + + + def read_alarm_behavior2(self): + ''' + set alarm behavior2 status from the MCU to the software + :return alarm_behavior2 dictionnary such as 1 = ON and 0 = OFF + ''' + return self.read_alarm(self.INSTRUCT_ALARMS_BEHAVIORS2, self.alarm_behavior2) + + def read_alarm_state(self): + ''' + set alarm status from the MCU to the software + :return alarm_status dictionnary such as 1 = ON and 0 = OFF + ''' + return self.read_alarm(self.ALARM_STATE, self.alarm_state, 1) + + def read_sequence_shape(self, sequence_id, sequence_dict): + """ + Set the value from the MCU's sequence to the sequence_dict such as sequence_a or sequence_b + :param sequence_id: to indentify the sequence to change such as INSTRUCT_TRIGGER_SEQUENCE_SYNC_A or INSTRUCT_TRIGGER_SEQUENCE_SYNC_B + :param sequence_dict: sequence dictionnary sequence_a or sequence_b + :no return + """ + sequence_in_bit = "{0:028b}".format(self.read_integer_instruction(self.INSTRUCT_TRIGGER_SEQUENCE_SYNC_A)) #conversion of an integer to a string of bits of length 28 + sequence_dict['LAST_VALID_ID'] = int(sequence_in_bit[0:4],2) #read the length on 4 bit, conversion bit string to int + j = 4 #start to read at four the sequence because the length is already set + for i in sequence_dict: + if i != 'LAST_VALID_ID' : + sequence_dict[i] = int(sequence_in_bit[j:j+3],2) #each id are written on 3 bits, conversion bit string to int + j += 3 + + return sequence_dict + + def set_sequence_shape(self, sequence_id, sequence_dict): + """ + Set the value modified in the sequence dictionnary such as sequence_a or sequence_b to the MCU + :param sequence_id: to indentify the sequence to change such as INSTRUCT_TRIGGER_SEQUENCE_SYNC_A or INSTRUCT_TRIGGER_SEQUENCE_SYNC_B + :param sequence_dict: sequence dictionnary sequence_a or sequence_b + :no return + conversion sequence dictionary -> bit string -> int -> bytes (U32 format) + """ + + sequence_in_bit_table = [] #table collecting the contents of the sequence dictionnary + for i in sequence_dict: + if i == 'LAST_VALID_ID' : + sequence_in_bit_table.append("{0:04b}".format(sequence_dict[i])) #length is written on 4 bits + else: + sequence_in_bit_table.append("{0:03b}".format(sequence_dict[i])) #id 1 to 8 are written on 3 bits + sequence_in_bit_str = "".join(sequence_in_bit_table) + sequence_in_int = int(sequence_in_bit_str, 2) + sequence_in_bytes = sequence_in_int.to_bytes(4, 'big') + value3 = sequence_in_bytes[3] #value3 of the data send + value2 = sequence_in_bytes[2] + value1 = sequence_in_bytes[1] + value0 = sequence_in_bytes[0] + + data = [0x00, self.INSTRUCT_TRIGGER_SEQUENCE_SYNC_A] + data += [value0, value1, value2, value3] #concatenate the instructions to send + + return self.send_query(self._add, self.COMMAND_WRITE, data) + + def read_sequence_a(self): + return self.read_sequence_shape(self.INSTRUCT_TRIGGER_SEQUENCE_SYNC_A, self.sequence_a) + + def set_sequence_a(self): + return self.set_sequence_shape(self.INSTRUCT_TRIGGER_SEQUENCE_SYNC_B, self.sequence_a) + + def read_sequence_b(self): + return self.read_sequence_shape(self.INSTRUCT_TRIGGER_SEQUENCE_SYNC_A, self.sequence_a) + + def set_sequence_b(self): + return self.set_sequence_shape(self.INSTRUCT_TRIGGER_SEQUENCE_SYNC_B, self.sequence_a) + + def measure_main_voltage(self): + return self.measure(self.MAIN_VOLTAGE, typ = 1) + + def measure_laser_diode_temperature(self): + return self.measure(self.LASER_DIODE_TEMPERATURE, typ = 1) + + def measure_alarm_state(self): + return self.measure(self.ALARM_STATE, typ = 0) + + def measure_average_optical_power(self): + return self.measure(self.AVERAGE_OPTICAL_POWER, typ = 1) + + def measure_alarm_triggered_interlock(self): + return self.measure(self.ALARM_TRIGGERED_INTERLOCK, typ = 0) + + def measure_mos_mpdm_temperature(self): + return self.measure(self.MOS_MPDM_TEMPERATURE, typ = 1) + diff --git a/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/tombak.py b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/tombak.py new file mode 100644 index 0000000..df645ff --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/build/lib/aerodiode/tombak.py @@ -0,0 +1,188 @@ +''' +Classe TOMBAK +''' +import serial +from enum import Enum +from aerodiode import Aerodiode +import csv +import struct + +class Tombak(Aerodiode): + + VERSION = "1.1.0" + #Command-------------------------------------------- + COMMAND_WRITE_ADDRESS = 0x00 + COMMAND_READ_EQUIPMENT = 0x01 + COMMAND_WRITE_SHAPE_VALUE = 0x16 + COMMAND_SAVE_SHAPE = 0x17 + COMMAND_SOFT_TRIGG = 0x18 + COMMAND_ENABLE_RESYNC = 0x19 + + #Instructions--------------------------------------- + INSTRUCT_FUNCTIONING_MODE = 0x0A + INSTRUCT_PULSE_IN_THRESHOLD = 0x0B + INSTRUCT_PULSE_IN_DELAY = 0x0C + INSTRUCT_PULSE_IN_SRC = 0x0D + INSTRUCT_PULSE_IN_FREQUENCY_DIV_32 = 0x0F + INSTRUCT_PULSE_OUT_DELAY = 0x10 + INSTRUCT_PULSE_OUT_WIDTH = 0x11 + INSTRUCT_PULSE_BURST_SIZE = 0x12 + INSTRUCT_TRIGGER_SRC = 0x13 + INSTRUCT_INTERN_TRIGGER_FREQ = 0x14 + INSTRUCT_SYNC_OUT_1_SRC = 0x15 + INSTRUCT_GATE_CONTROL = 0x16 + INSTRUCT_SYNC_OUT_2_SOURCE = 0x17 + INSTRUCT_PULSE_OUT_INVERSION = 0x18 + INSTRUCT_FINE_DELAY_ACTIVATION = 0x1B + INSTRUCT_EXTERNAL_GATE_SOURCE = 0x1C + INSTRUCT_RESYNC_DIVISION_32 = 0x1D + INSTRUCT_SHAPE1_STEP_NUMBER = 0x1E + INSTRUCT_SHAPE1_STEP_SIZE = 0x1F + INSTRUCT_SHAPE2_STEP_NUMBER = 0x20 + INSTRUCT_SHAPE2_STEP_SIZE = 0x21 + INSTRUCT_SHAPE3_STEP_NUMBER = 0x22 + INSTRUCT_SHAPE3_STEP_SIZE = 0x23 + INSTRUCT_SHAPE4_STEP_NUMBER = 0x24 + INSTRUCT_SHAPE4_STEP_SIZE = 0x25 + INSTRUCT_DEFAULT_VALUE = 0x26 + INSTRUCT_CLK_EXT_ACTIVATION = 0x27 + INSTRUCT_PULSE_IN_FREQUENCY_DIV_64 = 0x28 + INSTRUCT_RESYNC_DIVISION_64 = 0x29 + + #Measure ID--------------------------------------- + M_PULSE_IN_FREQ = 0x00 + SYNC_EXT_FREQ = 0x01 + + #XNN Value---------------------------------------- + DIVIDER = 1 + PULSE_PICKER = 2 + PULSE_GENERATOR = 3 + PULSE_SHAPE_DIVIDER = 4 + PULSE_SHAPE_PICKER = 5 + PULSE_SHAPE_GENERATOR = 6 + HIGH = 7 + + DIRECT = 0 + DAISY_SYNC_IN = 1 + + INT = 0 + EXT = 1 + + SYNC = 0 + TRIGGER = 1 + DELAY = 2 + PULSE_OUT = 3 + + NO_GATE = 0 + GATE = 1 + BURST_GATE = 2 + BURST_SERIAL = 3 + + PULSE_DIRECT = 0 + NULL = 1 + + POSITIVE_LOGIC = 0 + NEGATIVE_LOGIC = 1 + + PULSE_FREQ = 200000000 + + CLK_EXT_ON = 1 + CLK_EXT_OFF = 0 + + FINE_DELAY_OFF = 0 + FINE_DELAY_ON = 1 + + GATE_GATE_EXT = 0 + DAISY_SYNC_IN_2 = 1 + + #MEASURE-------------------------------------- + + M_PULSE_IN_FREQUENCY = 0 + M_SYNC_EXT_FREQUENCY = 1 + + #Methods + + def __init__(self, port, address = -1): + """ + Open serial device. + :param dev: Serial device path. For instance '/dev/ttyUSB0' on linux, 'COM0' on Windows. + """ + #Connection + Aerodiode.__init__(self,port, address) + + def save_shape_data(self): + return self.send_query(self._add, self.COMMAND_SAVE_SHAPE) + + def read_csv(self, csv_file): + """ + Convert a csv file in list.. + :param csv_file: file path of your csv file + :return: a table from your csv file + """ + table_csv = [] + with open(csv_file) as file: #open csv as file variable + read = csv.reader(file) #read file variable + print('', end='\n') #??? but it works + for line in read: #read line by line + #convert a list of string to int + int_line = "".join(line) #step 1 convert the list in a string + int_line = int(int_line) #step 2 convert in int + table_csv.append(int_line) #add to the end of the table + return table_csv + + def send_csv(self, shape_id, table_csv): + """ + Send a csv_table to the Shaper. + :param shape id: bytes from 0x00 to 0x03 (4 shapes available) + :param shape table_csv: table of integer + :no return + """ + """ + if table_csv[0]>4000: + raise ValueError('data must be inferior to 4000.') + """ + + + table_csv.pop(0) + if len(table_csv)>4000: + raise ValueError('data must be inferior to 4000.') + csv_output = [table_csv[i:i +60] for i in range(0, len(table_csv), 60)] #tableau 2D contenant des tableaux 1D qui sont les découpes de table_csv par pacquet de 60. + + for i in range(0, len(csv_output)): + + start = [0x00, 0x00] #Always start at the offset 0 which is declared on two bytes + if (i>0): + length = (0x3C)*i #=(60)*i because the card receive the data 60 by 60 values + if (length < 0xFF):#verify if the new offset could be represented on one or two bytes + start[0] = length&0xFF + else: + start[0] = length&0xFF #first byte of the offset + start[1] = (length>>8)&0xFF #second byte of the offset + + csv_send = [] + for j in range(0,len(csv_output[i])): + value1 = csv_output[i][j]//(0x100) + value0 = csv_output[i][j] - (csv_output[i][j]//0x100)*0x100 + #value 1 and 0 enable to represent each data on two bytes + csv_send.append(value1) + csv_send.append(value0) + + data = [shape_id, start[1], start[0]] + csv_send #concatenate the data + + self.send_query(self._add, self.COMMAND_WRITE_SHAPE_VALUE, data) #send the csv_send + + def software_trigger(self): + return self.send_query(self._add, self.COMMAND_SOFT_TRIGG) + + def measure_pulse_in_frequency(self): + return self.measure(self.M_PULSE_IN_FREQUENCY) + + def measure_sync_ext_frequency(self): + return self.measure(self.M_SYNC_EXT_FREQUENCY) + + + def enable_resync(self): + return self.send_query(self._add, self.COMMAND_ENABLE_RESYNC) + + + diff --git a/Python/aerodiode/AeroDIODE_Python_Library/dist/aerodiode-1.0.8-py3.10.egg b/Python/aerodiode/AeroDIODE_Python_Library/dist/aerodiode-1.0.8-py3.10.egg new file mode 100644 index 0000000..a472611 Binary files /dev/null and b/Python/aerodiode/AeroDIODE_Python_Library/dist/aerodiode-1.0.8-py3.10.egg differ diff --git a/Python/aerodiode/AeroDIODE_Python_Library/dist/aerodiode-1.1.0-py3.10.egg b/Python/aerodiode/AeroDIODE_Python_Library/dist/aerodiode-1.1.0-py3.10.egg new file mode 100644 index 0000000..5b04bb4 Binary files /dev/null and b/Python/aerodiode/AeroDIODE_Python_Library/dist/aerodiode-1.1.0-py3.10.egg differ diff --git a/Python/aerodiode/AeroDIODE_Python_Library/setup.py b/Python/aerodiode/AeroDIODE_Python_Library/setup.py new file mode 100644 index 0000000..1e9ad65 --- /dev/null +++ b/Python/aerodiode/AeroDIODE_Python_Library/setup.py @@ -0,0 +1,35 @@ +from setuptools import setup, find_packages +import codecs +import os + +here = os.path.abspath(os.path.dirname(__file__)) + +with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh: + long_description = "\n" + fh.read() + +VERSION = '1.1.0' +DESCRIPTION = 'Aerodiode device control' +LONG_DESCRIPTION = 'Enables to control Aerodiode optoelectronic device website : https://www.aerodiode.com/' + +# Setting up +setup( + name="aerodiode", + version=VERSION, + author="AeroDIODE", + author_email="", + description=DESCRIPTION, + long_description_content_type="text/markdown", + long_description=long_description, + packages=find_packages(), + install_requires=['pyserial', 'csv-reader'], + keywords=['python', 'optoelectronic', 'laser', 'aerodiode'], + classifiers=[ + "Development Status :: 1 - Planning", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Operating System :: Unix", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + ] + +) diff --git a/Python/aerodiode/CCS-HPP Aerodiode User Manual V1.5.pdf b/Python/aerodiode/CCS-HPP Aerodiode User Manual V1.5.pdf new file mode 100644 index 0000000..ae701c1 Binary files /dev/null and b/Python/aerodiode/CCS-HPP Aerodiode User Manual V1.5.pdf differ diff --git a/Python/aerodiode/pyaerodiode_user_document_pdmv5.pdf b/Python/aerodiode/pyaerodiode_user_document_pdmv5.pdf new file mode 100644 index 0000000..45eae6c Binary files /dev/null and b/Python/aerodiode/pyaerodiode_user_document_pdmv5.pdf differ diff --git a/Python/analyze_odmr.ipynb b/Python/analyze_odmr.ipynb new file mode 100644 index 0000000..6badcc8 --- /dev/null +++ b/Python/analyze_odmr.ipynb @@ -0,0 +1,186 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "87230806", + "metadata": {}, + "outputs": [], + "source": [ + "# Parameters\n", + "H5_PATH = \"data.h5\" # 기본값, 필요 시 최신 런 폴더 data.h5로 변경\n", + "AGG_METHOD = \"sum\" # 프레임 -> 스칼라 변환 방식 (\"sum\" 또는 \"mean\")\n", + "BASELINE_Q = 0.95 # I_off 추정용 상위 분위수\n", + "STAT_ACROSS_SWEEPS = \"mean\" # 주파수별 집계 방식 (\"mean\" 또는 \"median\")\n", + "DROP_WARMUP = True # 첫 스윕 드롭 여부\n", + "MIN_CYCLES_PER_FREQ = 2\n", + "SMOOTHING_WIN = None # 시각화용 smoothing 창 크기 (예: 3, None이면 사용안함)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26b73e27", + "metadata": {}, + "outputs": [], + "source": [ + "import h5py, numpy as np, pandas as pd, matplotlib.pyplot as plt, os\n", + "\n", + "with h5py.File(H5_PATH, \"r\") as f:\n", + " print(\"Keys in HDF5:\", list(f.keys()))\n", + " roi = f[\"roi\"][:]\n", + " frame_num = f[\"frame_num\"][:]\n", + " freq_hz = f[\"freq_hz\"][:]\n", + "\n", + "print(\"roi shape:\", roi.shape)\n", + "print(\"frame_num shape:\", frame_num.shape)\n", + "print(\"freq_hz shape:\", freq_hz.shape)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f9cd76f9", + "metadata": {}, + "outputs": [], + "source": [ + "freq_GHz = freq_hz / 1e9\n", + "print(\"Unique freqs:\", len(np.unique(freq_GHz)))\n", + "print(\"Frames:\", len(frame_num))\n", + "print(\"ROI pixel count per frame:\", roi.shape[1]*roi.shape[2])\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e38332ee", + "metadata": {}, + "outputs": [], + "source": [ + "if DROP_WARMUP:\n", + " first_val = freq_GHz[0]\n", + " reset_indices = np.where(freq_GHz == first_val)[0]\n", + " if len(reset_indices) > 1:\n", + " cut = reset_indices[1]\n", + " roi = roi[cut:]\n", + " frame_num = frame_num[cut:]\n", + " freq_GHz = freq_GHz[cut:]\n", + " print(f\"Warm-up cycle dropped, start from index {cut}\")\n", + " else:\n", + " print(\"No warm-up cycle detected\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b292140", + "metadata": {}, + "outputs": [], + "source": [ + "if AGG_METHOD == \"sum\":\n", + " I = roi.reshape(len(roi), -1).sum(axis=1)\n", + "else:\n", + " I = roi.reshape(len(roi), -1).mean(axis=1)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "82bee677", + "metadata": {}, + "outputs": [], + "source": [ + "I_sorted = np.sort(I)\n", + "cut = int((1-BASELINE_Q)*len(I_sorted))\n", + "I_off = np.median(I_sorted[-cut:]) if cut>0 else np.max(I_sorted)\n", + "print(\"I_off:\", I_off)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b6c2343c", + "metadata": {}, + "outputs": [], + "source": [ + "df = pd.DataFrame({\"freq_GHz\": freq_GHz, \"I\": I})\n", + "if STAT_ACROSS_SWEEPS == \"mean\":\n", + " grouped = df.groupby(\"freq_GHz\")[\"I\"].mean()\n", + "else:\n", + " grouped = df.groupby(\"freq_GHz\")[\"I\"].median()\n", + "\n", + "freqs = grouped.index.values\n", + "I_avg = grouped.values\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b73f8b39", + "metadata": {}, + "outputs": [], + "source": [ + "contrast_pct = (I_off - I_avg)/I_off * 100.0 # 공식 정의\n", + "odmr_contrast = 1.0 - contrast_pct/100.0 # 플롯용 dip\n", + "pl_norm = I_avg / np.max(I_avg)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b01fa496", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(6,4))\n", + "plt.plot(freqs, pl_norm, marker='o')\n", + "plt.ylabel(\"PL (norm.)\")\n", + "plt.xlabel(\"Frequency (GHz)\")\n", + "plt.ylim(pl_norm.min()-0.01, 1.0)\n", + "plt.title(\"Normalized PL vs Frequency\")\n", + "plt.grid(True)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bc9ece96", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(6,4))\n", + "plt.plot(freqs, odmr_contrast, marker='o')\n", + "plt.ylabel(\"1 - contrast (%)\")\n", + "plt.xlabel(\"Frequency (GHz)\")\n", + "plt.ylim(odmr_contrast.min()-0.01, 1.0)\n", + "plt.title(\"ODMR Contrast vs Frequency\")\n", + "plt.grid(True)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f106ae0c", + "metadata": {}, + "outputs": [], + "source": [ + "df_out = pd.DataFrame({\n", + " \"freq_GHz\": freqs,\n", + " \"PL_norm\": pl_norm,\n", + " \"contrast_pct\": contrast_pct,\n", + "})\n", + "df_out.to_csv(\"odmr_contrast.csv\", index=False)\n", + "print(\"Saved odmr_contrast.csv\")\n" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/Python/color_example.py b/Python/color_example.py new file mode 100644 index 0000000..6f72efc --- /dev/null +++ b/Python/color_example.py @@ -0,0 +1,100 @@ +""" +Color Example + +This example shows how to work with the mono to color processor module. It will open a camera and poll for an image, +but the focus will be on explaining how to color the image from the camera. + +""" + +try: + # if on Windows, use the provided setup script to add the DLLs folder to the PATH + from windows_setup import configure_path + configure_path() +except ImportError: + configure_path = None + +from thorlabs_tsi_sdk.tl_camera import TLCameraSDK +from thorlabs_tsi_sdk.tl_mono_to_color_processor import MonoToColorProcessorSDK +from thorlabs_tsi_sdk.tl_mono_to_color_enums import COLOR_SPACE +from thorlabs_tsi_sdk.tl_color_enums import FORMAT + +""" + The MonoToColorProcessorSDK and MonoToColorProcessor objects can be used with context managers for automatic + clean up. This multi-context-manager 'with' statement opens both the camera sdk and the mono to color sdk at + once. +""" +with TLCameraSDK() as camera_sdk, MonoToColorProcessorSDK() as mono_to_color_sdk: + available_cameras = camera_sdk.discover_available_cameras() + if len(available_cameras) < 1: + raise ValueError("no cameras detected") + + with camera_sdk.open_camera(available_cameras[0]) as camera: + camera.frames_per_trigger_zero_for_unlimited = 0 # start camera in continuous mode + camera.image_poll_timeout_ms = 2000 # 2 second timeout + camera.arm(2) + + """ + In a real-world scenario, we want to save the image width and height before color processing so that we + do not have to query it from the camera each time it is needed, which would slow down the process. It is + safe to save these after arming since the image width and height cannot change while the camera is armed. + """ + image_width = camera.image_width_pixels + image_height = camera.image_height_pixels + + camera.issue_software_trigger() + + frame = camera.get_pending_frame_or_null() + if frame is not None: + print("frame received!") + else: + raise ValueError("No frame arrived within the timeout!") + + camera.disarm() + + """ + When creating a mono to color processor, we want to initialize it using parameters from the camera. + """ + with mono_to_color_sdk.create_mono_to_color_processor( + camera.camera_sensor_type, + camera.color_filter_array_phase, + camera.get_color_correction_matrix(), + camera.get_default_white_balance_matrix(), + camera.bit_depth + ) as mono_to_color_processor: + """ + Once it is created, we can change the color space and output format properties. sRGB is the default + color space, and will usually give the best looking image. The output format will determine how the + transform image data will be structured. + """ + mono_to_color_processor.color_space = COLOR_SPACE.SRGB # sRGB color space + mono_to_color_processor.output_format = FORMAT.RGB_PIXEL # data is returned as sequential RGB values + """ + We can also adjust the Red, Green, and Blue gains. These values amplify the intensity of their + corresponding colors in the transformed image. For example, if Blue and Green gains are set to 0 + and the Red gain is 10, the resulting image will look entirely Red. The most common use case for these + properties will be for white balancing. By default they are set to model-specific values that gives + reasonably good white balance in typical lighting. + """ + print("Red Gain = {red_gain}\nGreen Gain = {green_gain}\nBlue Gain = {blue_gain}\n".format( + red_gain=mono_to_color_processor.red_gain, + green_gain=mono_to_color_processor.green_gain, + blue_gain=mono_to_color_processor.blue_gain + )) + """ + When we have all the settings we want for the mono to color processor, we call one of the transform_to + functions to get a color image. + """ + # this will give us a resulting image with 3 channels (RGB) and 16 bits per channel, resulting in 48 bpp + color_image_48_bpp = mono_to_color_processor.transform_to_48(frame.image_buffer, image_width, image_height) + + # this will give us a resulting image with 4 channels (RGBA) and 8 bits per channel, resulting in 32 bpp + color_image_32_bpp = mono_to_color_processor.transform_to_32(frame.image_buffer, image_width, image_height) + + # this will give us a resulting image with 3 channels (RGB) and 8 bits per channel, resulting in 24 bpp + color_image_24_bpp = mono_to_color_processor.transform_to_24(frame.image_buffer, image_width, image_height) + + # from here, perform any actions you need to using the color image + +# Because we are using the 'with' statement context-manager, disposal has been taken care of. + +print("program completed") diff --git a/Python/log.txt b/Python/log.txt new file mode 100644 index 0000000..cde1ef3 --- /dev/null +++ b/Python/log.txt @@ -0,0 +1,12 @@ +Exception ignored in atexit callback >: + 'matplotlib._pylab_helpers.Gcf'>>: + 'matplotlib._pylab_helpers.Gcf'>>: + 'matplotlib._pylab_helpers.Gcf'>>: +Traceback (most recent call last): + File "C:\Users\user\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\matplotlib\_pylab_helpers.py", line 81, in destroy_all + manager.destroy() + File "C:\Users\user\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\matplotlib\backends\_backend_tk.py", line 590, in destroy + self._window_dpi.trace_remove('write', self._window_dpi_cbname) + File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.13_3.13.2032.0_x64__qbz5n2kfra8p0\Lib\tkinter\__init__.py", line 474, in trace_remove + self._tk.call('trace', 'remove', 'variable', +RuntimeError: main thread is not in main loop diff --git a/Python/polarization_example.py b/Python/polarization_example.py new file mode 100644 index 0000000..d7e6c90 --- /dev/null +++ b/Python/polarization_example.py @@ -0,0 +1,158 @@ +""" +Polarization Example + +This example shows how to work with the polarization processor module. It will open a camera and poll for an image, +but the focus will be on explaining how to process the image from the camera. At the end, 4 consecutive images should +pop up showing intensity data, azimuth data, degree of linear polarization (DoLP) data, and a Quad View. + +""" + +try: + # if on Windows, use the provided setup script to add the DLLs folder to the PATH + from windows_setup import configure_path + configure_path() +except ImportError: + configure_path = None + +import numpy as np +from PIL import Image + +from thorlabs_tsi_sdk.tl_camera import TLCameraSDK +from thorlabs_tsi_sdk.tl_camera_enums import SENSOR_TYPE +from thorlabs_tsi_sdk.tl_polarization_processor import PolarizationProcessorSDK + +""" + The PolarizationProcessorSDK and PolarizationProcessor objects can be used with context managers for automatic + clean up. This multi-context-manager 'with' statement opens both the camera sdk and the polarization sdk at + once. +""" +with TLCameraSDK() as camera_sdk, PolarizationProcessorSDK() as polarization_sdk: + available_cameras = camera_sdk.discover_available_cameras() + if len(available_cameras) < 1: + raise ValueError("no cameras detected") + + with camera_sdk.open_camera(available_cameras[0]) as camera: + camera.frames_per_trigger_zero_for_unlimited = 0 # start camera in continuous mode + camera.image_poll_timeout_ms = 2000 # 2 second timeout + camera.arm(2) + + """ + In a real-world scenario, we want to save the image width and height before color processing so that we + do not have to query it from the camera each time it is needed, which would slow down the process. It is + safe to save these after arming since the image width and height cannot change while the camera is armed. + """ + image_width = camera.image_width_pixels + image_height = camera.image_height_pixels + + camera.issue_software_trigger() + + frame = camera.get_pending_frame_or_null() + if frame is not None: + print("frame received!") + else: + raise ValueError("No frame arrived within the timeout!") + + camera.disarm() + + if camera.camera_sensor_type is not SENSOR_TYPE.MONOCHROME_POLARIZED: + raise ValueError("Polarization processing should only be done with polarized cameras") + + camera_polar_phase = camera.polar_phase + camera_bit_depth = camera.bit_depth + """ + We're scaling to 8-bits in this example so we can easily convert to PIL Image objects. + """ + max_output_value = 255 + + with polarization_sdk.create_polarization_processor() as polarization_processor: + """ + Convert the raw sensor data to polarization image data. We will convert to each of the available outputs: + intensity, azimuth, and degree of linear polarization (DoLP). + """ + output_intensity = polarization_processor.transform_to_intensity(camera_polar_phase, + frame.image_buffer, + 0, # origin x + 0, # origin y + image_width, + image_height, + camera_bit_depth, + max_output_value) + output_azimuth = polarization_processor.transform_to_azimuth(camera_polar_phase, + frame.image_buffer, + 0, # origin x + 0, # origin y + image_width, + image_height, + camera_bit_depth, + max_output_value) + output_dolp = polarization_processor.transform_to_dolp(camera_polar_phase, + frame.image_buffer, + 0, # origin x + 0, # origin y + image_width, + image_height, + camera_bit_depth, + max_output_value) + """ + Convert from 16-bit to 8-bit + """ + output_intensity = output_intensity.astype(np.ubyte) + output_azimuth = output_azimuth.astype(np.ubyte) + output_dolp = output_dolp.astype(np.ubyte) + """ + Reshape the 1D output arrays to be 2D arrays (image_width x image_height) + """ + output_intensity = output_intensity.reshape(image_height, image_width) + output_azimuth = output_azimuth.reshape(image_height, image_width) + output_dolp = output_dolp.reshape(image_height, image_width) + """ + Convert the polarization image data to PIL Image types + """ + intensity_image = Image.fromarray(output_intensity) + azimuth_image = Image.fromarray(output_azimuth) + dolp_image = Image.fromarray(output_dolp) + """ + We're going to display each of the images created using PIL's show() method. This tries to open the images + using your default image viewer, and may fail on some configurations. + """ + intensity_image.show() + azimuth_image.show() + dolp_image.show() + """ + Lastly, we'll construct a QuadView image that is useful for visualizing each polar rotation: 0, 45, 90, and + -45 degrees. The sensor on the polarized camera has a filter in front of it that is composed of 2x2 pixel + sections that look like the following pattern: + + ------------- + | +45 | +90 | + ------------- + | + 0 | -45 | + ------------- + + It is always 2x2, but the ordering of the rotations may differ depending on your camera model. The top left + rotation (the 'origin' rotation) is always equal to the camera_polar_phase that was queried earlier. We'll + use array splicing to extract each of the rotations and separate them visually. If you are familiar with + manipulating color image arrays, this is similar to pulling out the R, G, and B components of an RGB image. + """ + unprocessed_image = frame.image_buffer.reshape(image_height, image_width) # this is the raw image data + unprocessed_image = unprocessed_image >> camera_bit_depth - 8 # scale to 8 bits for easier displaying + output_quadview = np.zeros(shape=(image_height, image_width)) # initialize array for QuadView data + # Top Left Quadrant = + output_quadview[0:int(image_height / 2), 0:int(image_width / 2)] = \ + unprocessed_image[0::2, 0::2] # (0,0): top left rotation == camera_polar_phase + # Top Right Quadrant = + output_quadview[0:int(image_height / 2), int(image_width / 2):image_width] = \ + unprocessed_image[0::2, 1::2] # (0,1): top right rotation + # Bottom Left Quadrant = + output_quadview[int(image_height / 2):image_height, 0:int(image_width / 2)] = \ + unprocessed_image[1::2, 0::2] # (1,0): bottom left rotation + # Bottom Right Quadrant = + output_quadview[int(image_height / 2):image_height, int(image_width / 2):image_width] = \ + unprocessed_image[1::2, 1::2] # (1,1): bottom right rotation + # Display QuadView + quadview_image = Image.fromarray(output_quadview) + quadview_image.show() + +# Because we are using the 'with' statement context-manager, disposal has been taken care of. + +print("program completed") diff --git a/Python/polling_example.py b/Python/polling_example.py new file mode 100644 index 0000000..efc1b46 --- /dev/null +++ b/Python/polling_example.py @@ -0,0 +1,68 @@ +""" +Polling Example + +This example shows how to open a camera, adjust some settings, and poll for images. It also shows how 'with' statements +can be used to automatically clean up camera and SDK resources. + +""" + +try: + # if on Windows, use the provided setup script to add the DLLs folder to the PATH + from windows_setup import configure_path + configure_path() +except ImportError: + configure_path = None + +import numpy as np +from thorlabs_tsi_sdk.tl_camera import TLCameraSDK, OPERATION_MODE + +NUM_FRAMES = 10 # adjust to the desired number of frames + + +with TLCameraSDK() as sdk: + available_cameras = sdk.discover_available_cameras() + if len(available_cameras) < 1: + print("no cameras detected") + + with sdk.open_camera(available_cameras[0]) as camera: + camera.exposure_time_us = 11000 # set exposure to 11 ms + camera.frames_per_trigger_zero_for_unlimited = 0 # start camera in continuous mode + camera.image_poll_timeout_ms = 1000 # 1 second polling timeout + old_roi = camera.roi # store the current roi + """ + uncomment the line below to set a region of interest (ROI) on the camera + """ + # camera.roi = (100, 100, 600, 600) # set roi to be at origin point (100, 100) with a width & height of 500 + + """ + uncomment the lines below to set the gain of the camera and read it back in decibels + """ + #if camera.gain_range.max > 0: + # db_gain = 6.0 + # gain_index = camera.convert_decibels_to_gain(db_gain) + # camera.gain = gain_index + # print(f"Set camera gain to {camera.convert_gain_to_decibels(camera.gain)}") + + camera.arm(2) + + camera.issue_software_trigger() + + for i in range(NUM_FRAMES): + frame = camera.get_pending_frame_or_null() + if frame is not None: + print("frame #{} received!".format(frame.frame_count)) + + frame.image_buffer # .../ perform operations using the data from image_buffer + + # NOTE: frame.image_buffer is a temporary memory buffer that may be overwritten during the next call + # to get_pending_frame_or_null. The following line makes a deep copy of the image data: + image_buffer_copy = np.copy(frame.image_buffer) + else: + print("timeout reached during polling, program exiting...") + break + camera.disarm() + camera.roi = old_roi # reset the roi back to the original roi + +# Because we are using the 'with' statement context-manager, disposal has been taken care of. + +print("program completed") diff --git a/Python/tifffile_tiff_writing_example.py b/Python/tifffile_tiff_writing_example.py new file mode 100644 index 0000000..e4618f4 --- /dev/null +++ b/Python/tifffile_tiff_writing_example.py @@ -0,0 +1,139 @@ +""" +Tiff Writing Example - tifffile + +This example shows how to use Thorlabs TSI Cameras to write images to a disk using the tifffile library, +see https://pypi.org/project/tifffile/ for more information. + +There are many TIFF-writing libraries for python, this example is meant to show how to integrate with tifffile. +The process should generally be the same with most TIFF-writing libraries, but results may vary. + +In this example 10 images are going to be taken and saved to a single multipage TIFF file. The program will detect +if the camera has a color filter and will perform color processing if so. + +One thing to note is that this program will save TIFFs in the camera's bit depth. Some image viewers may not recognize +this and will show the images as being much darker than expected. If you are experiencing dark images, we recommend +trying out various image viewers designed for scientific imaging such as ThorCam or ImageJ. + +""" + +try: + # if on Windows, use the provided setup script to add the DLLs folder to the PATH + from windows_setup import configure_path + configure_path() +except ImportError: + configure_path = None + +import os +import tifffile + +from thorlabs_tsi_sdk.tl_camera import TLCameraSDK +from thorlabs_tsi_sdk.tl_mono_to_color_processor import MonoToColorProcessorSDK +from thorlabs_tsi_sdk.tl_camera_enums import SENSOR_TYPE + +NUMBER_OF_IMAGES = 10 # Number of TIFF images to be saved +OUTPUT_DIRECTORY = os.path.abspath(r'.') # Directory the TIFFs will be saved to +FILENAME = 'image.tif' # The filename of the TIFF + +TAG_BITDEPTH = 32768 +TAG_EXPOSURE = 32769 + +# delete image if it exists +if os.path.exists(OUTPUT_DIRECTORY + os.sep + FILENAME): + os.remove(OUTPUT_DIRECTORY + os.sep + FILENAME) + +with TLCameraSDK() as sdk: + cameras = sdk.discover_available_cameras() + if len(cameras) == 0: + print("Error: no cameras detected!") + + with sdk.open_camera(cameras[0]) as camera: + # setup the camera for continuous acquisition + camera.frames_per_trigger_zero_for_unlimited = 0 + camera.image_poll_timeout_ms = 2000 # 2 second timeout + camera.arm(2) + + # save these values to place in our custom TIFF tags later + bit_depth = camera.bit_depth + exposure = camera.exposure_time_us + + # need to save the image width and height for color processing + image_width = camera.image_width_pixels + image_height = camera.image_height_pixels + + # initialize a mono to color processor if this is a color camera + is_color_camera = (camera.camera_sensor_type == SENSOR_TYPE.BAYER) + mono_to_color_sdk = None + mono_to_color_processor = None + if is_color_camera: + mono_to_color_sdk = MonoToColorProcessorSDK() + mono_to_color_processor = mono_to_color_sdk.create_mono_to_color_processor( + camera.camera_sensor_type, + camera.color_filter_array_phase, + camera.get_color_correction_matrix(), + camera.get_default_white_balance_matrix(), + camera.bit_depth + ) + + # begin acquisition + camera.issue_software_trigger() + frames_counted = 0 + while frames_counted < NUMBER_OF_IMAGES: + frame = camera.get_pending_frame_or_null() + if frame is None: + raise TimeoutError("Timeout was reached while polling for a frame, program will now exit") + + frames_counted += 1 + + image_data = frame.image_buffer + if is_color_camera: + # transform the raw image data into RGB color data + image_data = mono_to_color_processor.transform_to_48(image_data, image_width, image_height) + image_data = image_data.reshape(image_height, image_width, 3) + + with tifffile.TiffWriter(OUTPUT_DIRECTORY + os.sep + FILENAME, append=True) as tiff: + """ + Setting append=True here means that calling tiff.save will add the image as a page to a multipage TIFF. + """ + tiff.save(data=image_data, # np.ushort image data array from the camera + compress=0, # amount of compression (0-9), by default it is uncompressed (0) + extratags=[(TAG_BITDEPTH, 'I', 1, bit_depth, False), # custom TIFF tag for bit depth + (TAG_EXPOSURE, 'I', 1, exposure, False)] # custom TIFF tag for exposure + ) + """ + If compress > 0 tifffile will compress the image using zlib - deflate compression. + Instead of an int a str can be supplied to specify a different compression algorithm; + e.g. compress = 'lzma' + View the tifffile source or online to see what is supported. + """ + """ + The extratags parameter allows the user to specify additional tags. Programs will typically ignore + any tags from 32768 onward, which is where the bit depth and exposure have been placed. The + syntax for extra tags is (tag_code, data_type_of_value, number_of_values, value, write_once). + View the tifffile source for more information. + """ + camera.disarm() + + # we did not use context manager for color processor, so manually dispose of it + if is_color_camera: + try: + mono_to_color_processor.dispose() + except Exception as exception: + print("Unable to dispose mono to color processor: " + str(exception)) + try: + mono_to_color_sdk.dispose() + except Exception as exception: + print("Unable to dispose mono to color sdk: " + str(exception)) + +""" +Reading tiffs - to test that the tags from before worked, we're going to read back the tags on the first page. +Note that custom TIFF tags are not going to be picked up by normal TIFF viewers, but can be read programmatically +if the tag code is known. +""" +# open file + +with tifffile.TiffFile(OUTPUT_DIRECTORY + os.sep + FILENAME) as tiff_read: + if len(tiff_read.pages) < 1: + raise ValueError("No pages were found in multipage TIFF") + page_one = tiff_read.pages[0] + print("First Image: Bit Depth = {} bpp, Exposure Time = {} ms".format(page_one.tags[str(TAG_BITDEPTH)].value, + page_one.tags[str(TAG_EXPOSURE)].value/1000)) diff --git a/Python/tkinter_camera_live_view.py b/Python/tkinter_camera_live_view.py new file mode 100644 index 0000000..2fa589f --- /dev/null +++ b/Python/tkinter_camera_live_view.py @@ -0,0 +1,202 @@ +""" +Camera Live View - TkInter + +This example shows how one could create a live image viewer using TkInter. +It also uses the third party library 'pillow', which is a fork of PIL. + +This example detects if a camera is a color camera and will process the +images using the tl_mono_to_color_processor module. + +This example uses threading to enqueue images coming off the camera in one thread, and +dequeue them in the UI thread for quick displaying. + +""" + +try: + # if on Windows, use the provided setup script to add the DLLs folder to the PATH + from windows_setup import configure_path + configure_path() +except ImportError: + configure_path = None + +from thorlabs_tsi_sdk.tl_camera import TLCameraSDK, TLCamera, Frame +from thorlabs_tsi_sdk.tl_camera_enums import SENSOR_TYPE +from thorlabs_tsi_sdk.tl_mono_to_color_processor import MonoToColorProcessorSDK + +try: + # For python 2.7 tkinter is named Tkinter + import Tkinter as tk +except ImportError: + import tkinter as tk +from PIL import Image, ImageTk +import typing +import threading +try: + # For Python 2.7 queue is named Queue + import Queue as queue +except ImportError: + import queue + +""" LiveViewCanvas + +This is a Tkinter Canvas object that can be reused in custom programs. The Canvas expects a parent Tkinter object and +an image queue. The image queue is a queue.Queue that it will pull images from, and is expected to hold PIL Image +objects that will be displayed to the canvas. It automatically adjusts its size based on the incoming image dimensions. + +""" + + +class LiveViewCanvas(tk.Canvas): + + def __init__(self, parent, image_queue): + # type: (typing.Any, queue.Queue) -> LiveViewCanvas + self.image_queue = image_queue + self._image_width = 0 + self._image_height = 0 + tk.Canvas.__init__(self, parent) + self.pack() + self._get_image() + + def _get_image(self): + try: + image = self.image_queue.get_nowait() + self._image = ImageTk.PhotoImage(master=self, image=image) + if (self._image.width() != self._image_width) or (self._image.height() != self._image_height): + # resize the canvas to match the new image size + self._image_width = self._image.width() + self._image_height = self._image.height() + self.config(width=self._image_width, height=self._image_height) + self.create_image(0, 0, image=self._image, anchor='nw') + except queue.Empty: + pass + self.after(10, self._get_image) + + +""" ImageAcquisitionThread + +This class derives from threading.Thread and is given a TLCamera instance during initialization. When started, the +thread continuously acquires frames from the camera and converts them to PIL Image objects. These are placed in a +queue.Queue object that can be retrieved using get_output_queue(). The thread doesn't do any arming or triggering, +so users will still need to setup and control the camera from a different thread. Be sure to call stop() when it is +time for the thread to stop. + +""" + + +class ImageAcquisitionThread(threading.Thread): + + def __init__(self, camera): + # type: (TLCamera) -> ImageAcquisitionThread + super(ImageAcquisitionThread, self).__init__() + self._camera = camera + self._previous_timestamp = 0 + + # setup color processing if necessary + if self._camera.camera_sensor_type != SENSOR_TYPE.BAYER: + # Sensor type is not compatible with the color processing library + self._is_color = False + else: + self._mono_to_color_sdk = MonoToColorProcessorSDK() + self._image_width = self._camera.image_width_pixels + self._image_height = self._camera.image_height_pixels + self._mono_to_color_processor = self._mono_to_color_sdk.create_mono_to_color_processor( + SENSOR_TYPE.BAYER, + self._camera.color_filter_array_phase, + self._camera.get_color_correction_matrix(), + self._camera.get_default_white_balance_matrix(), + self._camera.bit_depth + ) + self._is_color = True + + self._bit_depth = camera.bit_depth + self._camera.image_poll_timeout_ms = 0 # Do not want to block for long periods of time + self._image_queue = queue.Queue(maxsize=2) + self._stop_event = threading.Event() + + def get_output_queue(self): + # type: (type(None)) -> queue.Queue + return self._image_queue + + def stop(self): + self._stop_event.set() + + def _get_color_image(self, frame): + # type: (Frame) -> Image + # verify the image size + width = frame.image_buffer.shape[1] + height = frame.image_buffer.shape[0] + if (width != self._image_width) or (height != self._image_height): + self._image_width = width + self._image_height = height + print("Image dimension change detected, image acquisition thread was updated") + # color the image. transform_to_24 will scale to 8 bits per channel + color_image_data = self._mono_to_color_processor.transform_to_24(frame.image_buffer, + self._image_width, + self._image_height) + color_image_data = color_image_data.reshape(self._image_height, self._image_width, 3) + # return PIL Image object + return Image.fromarray(color_image_data, mode='RGB') + + def _get_image(self, frame): + # type: (Frame) -> Image + # no coloring, just scale down image to 8 bpp and place into PIL Image object + scaled_image = frame.image_buffer >> (self._bit_depth - 8) + return Image.fromarray(scaled_image) + + def run(self): + while not self._stop_event.is_set(): + try: + frame = self._camera.get_pending_frame_or_null() + if frame is not None: + if self._is_color: + pil_image = self._get_color_image(frame) + else: + pil_image = self._get_image(frame) + self._image_queue.put_nowait(pil_image) + except queue.Full: + # No point in keeping this image around when the queue is full, let's skip to the next one + pass + except Exception as error: + print("Encountered error: {error}, image acquisition will stop.".format(error=error)) + break + print("Image acquisition has stopped") + if self._is_color: + self._mono_to_color_processor.dispose() + self._mono_to_color_sdk.dispose() + + +""" Main + +When run as a script, a simple Tkinter app is created with just a LiveViewCanvas widget. + +""" +if __name__ == "__main__": + with TLCameraSDK() as sdk: + camera_list = sdk.discover_available_cameras() + with sdk.open_camera(camera_list[0]) as camera: + + # create generic Tk App with just a LiveViewCanvas widget + print("Generating app...") + root = tk.Tk() + root.title(camera.name) + image_acquisition_thread = ImageAcquisitionThread(camera) + camera_widget = LiveViewCanvas(parent=root, image_queue=image_acquisition_thread.get_output_queue()) + + print("Setting camera parameters...") + camera.frames_per_trigger_zero_for_unlimited = 0 + camera.arm(2) + camera.issue_software_trigger() + + print("Starting image acquisition thread...") + image_acquisition_thread.start() + + print("App starting") + root.mainloop() + + print("Waiting for image acquisition thread to finish...") + image_acquisition_thread.stop() + image_acquisition_thread.join() + + print("Closing resources...") + + print("App terminated. Goodbye!")