diff --git a/scripts/pylib/display-twister-harness/camera_shield/README.rst b/scripts/pylib/display-twister-harness/camera_shield/README.rst new file mode 100644 index 0000000000000..2458be9912cc7 --- /dev/null +++ b/scripts/pylib/display-twister-harness/camera_shield/README.rst @@ -0,0 +1,124 @@ +============== +Display capture Twister harness +============== + + +Configuration example +--------------------- + +.. code-block:: console + + case_config: + device_id: 0 # Try different camera indices + res_x: 1280 # x resolution + res_y: 720 # y resolution + fps: 30 # analysis frame pre-second + run_time: 20 # Run for 20 seconds + tests: + timeout: 30 # second wait for prompt string + prompt: "screen starts" # prompt to show the test start + expect: ["tests.drivers.display.check.shield"] + plugins: + - name: signature + module: plugins.signature_plugin + class: VideoSignaturePlugin + status: "enable" + config: + operations: "compare" # operation ('generate', 'compare') + metadata: + name: "tests.drivers.display.check.shield" # finger-print stored metadata + platform: "frdm_mcxn947" + directory: "./fingerprints" # fingerprints directory to compare with, not used in generate mode + duration: 100 # number of frames to check + method: "combined" #Signature method ('phash', 'dhash', 'histogram', 'combined') + threshold: 0.65 + phash_weight: 0.35 + dhash_weight: 0.25 + histogram_weight: 0.2 + edge_ratio_weight: 0.1 + gradient_hist_weight: 0.1 + +example zephyr display tests +---------------------------- + +1. Setup camera to capture display content + + - UVC compatible camera with at least 2 megapixels (such as 1080p) + - A light-blocking black curtain + - A PC host where camera connect to + - DUT connected to the same PC host for flashing and serial console + +2. Generate video fingerprints + + - build and flash the known-to-work display app to DUT + e.g. +``` +west build -b frdm_mcxn947/mcxn947/cpu0 tests/drivers/display/display_check +west flash +``` + + - clone code +```bash +git clone https://github.com/hakehuang/camera_shield +``` + + + - follow the instructions in the repo's README. + - set the signature capture mode as below in config.yaml +```yaml + - name: signature + module: .plugins.signature_plugin + class: VideoSignaturePlugin + status: "enable" + config: + operations: "generate" # operation ('generate', 'compare') + metadata: + name: "tests.drivers.display.check.shield" # finger-print stored metadata + platform: "frdm_mcxn947" + directory: "./fingerprints" # fingerprints directory to compare with not used in generate mode +``` + + - Run generate fingerprints program outside the camera_shield folder + + Note: + On Ubuntu 24.04, you may need to do ```export QT_QPA_PLATFORM=xcb``` to resolve below error + +```bash +qt.qpa.plugin: Could not find the Qt platform plugin "wayland" in "~/camera_shield/.ven/lib/python3.12/site-packages/cv2/qt/plugins" +``` + +```bash +python -m camera_shield.main --config camera_shield/config.yaml +``` + +video fingerprint for captured screenshots will be recorded in directory './fingerprints' by default + + - set environment variable to "DISPLAY_TEST_DIR" + +```bash +DISPLAY_TEST_DIR=~/camera_shield/ +``` + +3. Run test +```bash +# export the fingerprints path +export DISPLAY_TEST_DIR= + +# Twister hardware map file settings: +# Ensure your map file has the required fixture +# in the example below, you need to have "fixture_display" + +# Ensure you have installed the required Python packages for tests in scripts/requirements-run-test.txt + +# Run detection program +scripts/twister --device-testing --hardware-map map.yml -T tests/drivers/display/display_check/ + +``` + +Notes +----- + +1. When generating the fingerprints, they will be stored in folder "name" as defined in "metadata" from ``config.yaml`` . +2. The DUT testcase name shall match the value in the metadata 'name' field of the captured fingerprint's config. +3. You can put multiple fingerprints in one folder, it will increase compare time, + but will help to check other defects. diff --git a/scripts/pylib/display-twister-harness/camera_shield/__init__.py b/scripts/pylib/display-twister-harness/camera_shield/__init__.py new file mode 100644 index 0000000000000..7172ccebf93ba --- /dev/null +++ b/scripts/pylib/display-twister-harness/camera_shield/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2025 NXP +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/scripts/pylib/display-twister-harness/camera_shield/config.yaml b/scripts/pylib/display-twister-harness/camera_shield/config.yaml new file mode 100644 index 0000000000000..d02e61f820476 --- /dev/null +++ b/scripts/pylib/display-twister-harness/camera_shield/config.yaml @@ -0,0 +1,22 @@ +case_config: {device_id: 0, fps: 30, res_y: 720, res_x: 1280, run_time: 20} +plugins: +- class: VideoSignaturePlugin + config: + dhash_weight: 0.25 + directory: ${DISPLAY_TEST_DIR}/./fingerprints + duration: 100 + edge_ratio_weight: 0.1 + gradient_hist_weight: 0.1 + histogram_weight: 0.2 + metadata: {name: tests.drivers.display.check.shield, platform: frdm_mcxn947} + method: combined + operations: compare + phash_weight: 0.35 + threshold: 0.65 + module: .plugins.signature_plugin + name: signature + status: enable +tests: + expect: [tests.drivers.display.check.shield] + prompt: screen starts + timeout: 30 diff --git a/scripts/pylib/display-twister-harness/camera_shield/main.py b/scripts/pylib/display-twister-harness/camera_shield/main.py new file mode 100644 index 0000000000000..0e1b44f929e94 --- /dev/null +++ b/scripts/pylib/display-twister-harness/camera_shield/main.py @@ -0,0 +1,120 @@ +# Copyright (c) 2025 NXP +# +# SPDX-License-Identifier: Apache-2.0 + +import importlib +import io +import os +import sys +import time +from string import Template + +import cv2 +import yaml + +from camera_shield.uvc_core.camera_controller import UVCCamera +from camera_shield.uvc_core.plugin_base import PluginManager + +sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') + + +class Application: + def __init__(self, config_path="config.yaml"): + def resolve_env_vars(yaml_dict): + """Process yaml with Template strings for safer environment variable resolution.""" + if isinstance(yaml_dict, dict): + return {k: resolve_env_vars(v) for k, v in yaml_dict.items()} + elif isinstance(yaml_dict, list): + return [resolve_env_vars(i) for i in yaml_dict] + elif isinstance(yaml_dict, str): + # Create a template and substitute environment variables + template = Template(yaml_dict) + return template.safe_substitute(os.environ) + else: + return yaml_dict + + self.active_plugins = {} # Initialize empty plugin dictionary + with open(config_path, encoding="utf-8-sig") as f: + config = yaml.safe_load(f) + self.config = resolve_env_vars(config) + + os.environ["DISPLAY"] = ":0" + + self.case_config = { + "device_id": 0, + "res_x": 1280, + "res_y": 720, + "fps": 30, + "run_time": 20, + } + + if "case_config" in self.config: + self.case_config["device_id"] = self.config["case_config"].get("device_id", 0) + self.case_config["res_x"] = self.config["case_config"].get("res_x", 1280) + self.case_config["res_y"] = self.config["case_config"].get("res_y", 720) + self.case_config["fps"] = self.config["case_config"].get("fps", 30) + self.case_config["run_time"] = self.config["case_config"].get("run_time", 20) + + self.camera = UVCCamera(self.case_config) + self.plugin_manager = PluginManager() + self.load_plugins() + self.results = [] + + def load_plugins(self): + for plugin_cfg in self.config["plugins"]: + if plugin_cfg.get("status", "disable") == "disable": + continue + module = importlib.import_module(plugin_cfg["module"], package=__package__) + plugin_class = getattr(module, plugin_cfg["class"]) + self.active_plugins[plugin_cfg["name"]] = plugin_class( + plugin_cfg["name"], plugin_cfg.get("config", {}) + ) + self.plugin_manager.register_plugin(plugin_cfg["name"], plugin_class) + + def handle_results(self, results, frame): + for name, plugin in self.active_plugins.items(): + if name in results: + plugin.handle_results(results[name], frame) + + def shutdown(self): + self.camera.release() + for plugin in self.active_plugins.values(): + self.results += plugin.shutdown() + + def run(self): + try: + start_time = time.time() + self.camera.initialize() + for name, plugin in self.active_plugins.items(): # noqa: B007 + plugin.initialize() + while True: + ret, frame = self.camera.get_frame() + if not ret: + continue + + # Maintain OpenCV event loop + if cv2.waitKey(1) == 27: # ESC key + break + + results = {} + for name, plugin in self.active_plugins.items(): + results[name] = plugin.process_frame(frame) + + self.handle_results(results, frame) + self.camera.show_frame(frame) + frame_delay = 1 / self.case_config["fps"] + if time.time() - start_time > self.case_config["run_time"]: + break + time.sleep(frame_delay) + + except KeyboardInterrupt: + print("quit by key input\n") + finally: + self.shutdown() + + return self.results + + +if __name__ == "__main__": + app = Application() + app.run() diff --git a/scripts/pylib/display-twister-harness/camera_shield/plugins/__init__.py b/scripts/pylib/display-twister-harness/camera_shield/plugins/__init__.py new file mode 100644 index 0000000000000..7172ccebf93ba --- /dev/null +++ b/scripts/pylib/display-twister-harness/camera_shield/plugins/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2025 NXP +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/scripts/pylib/display-twister-harness/camera_shield/plugins/signature_plugin.py b/scripts/pylib/display-twister-harness/camera_shield/plugins/signature_plugin.py new file mode 100644 index 0000000000000..8ed152c394c95 --- /dev/null +++ b/scripts/pylib/display-twister-harness/camera_shield/plugins/signature_plugin.py @@ -0,0 +1,533 @@ +# Copyright (c) 2025 NXP +# +# SPDX-License-Identifier: Apache-2.0 + +import hashlib +import os +import pickle +from datetime import datetime + +import cv2 +import numpy as np + +from camera_shield.uvc_core.plugin_base import DetectionPlugin + + +class VideoSignaturePlugin(DetectionPlugin): + """Plugin for generating and comparing video frame signatures and fingerprints""" + + def __init__(self, name, config): + super().__init__(name, config) + self.fingerprint_cache = [] # Store video fingerprints + self.fingerprint = { + "method": config.get("method", "combined"), + "frame_signatures": [], + "metadata": config.get("metadata", {}), + } + self.frame_count = 0 + self.operations = config.get("operations", "compare") + self.saved = False + self.result = [] + + def initialize(self): + """Initialize detection resources""" + print("initialize") + if self.operations == "compare": + print("compare") + directory = self.config.get("directory", "./fingerprints") + if os.path.isdir(directory): + self.load_all_fingerprints(directory) + else: + print(f"{directory} not exist") + + def generate_frame_signature(self, frame, method="combined") -> dict: + """Generate a robust signature for a given frame using multiple techniques + + Args: + frame: The input frame + method: Signature method ('phash', 'dhash', 'histogram', 'combined') + + Returns: + A dictionary containing signature data + """ + if frame is None: + return None + + # Convert to grayscale for consistent processing + if len(frame.shape) == 3: + gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + else: + gray = frame.copy() + + # Resize to standard size for consistent signatures + resized = cv2.resize(gray, (64, 64)) + + signature = {} + + # Perceptual Hash (pHash) + if method in ["phash", "combined"]: + # DCT transform + dct = cv2.dct(np.float32(resized)) + # Keep only the top-left 8x8 coefficients + dct_low = dct[:8, :8] + # Compute median value + med = np.median(dct_low) + # Convert to binary hash + phash = (dct_low > med).flatten().astype(int) + signature["phash"] = phash.tolist() + # Generate a compact hex representation + phash_hex = "".join( + [ + hex(int("".join(map(str, phash[i : i + 4])), 2))[2:] + for i in range(0, len(phash), 4) + ] + ) + signature["phash_hex"] = phash_hex + + # Difference Hash (dHash) + if method in ["dhash", "combined"]: + # Resize to 9x8 (one pixel larger horizontally) + resized_dhash = cv2.resize(gray, (9, 8)) + # Compute differences horizontally + diff = resized_dhash[:, 1:] > resized_dhash[:, :-1] + # Flatten to 1D array + dhash = diff.flatten().astype(int) + signature["dhash"] = dhash.tolist() + # Generate a compact hex representation + dhash_hex = "".join( + [ + hex(int("".join(map(str, dhash[i : i + 4])), 2))[2:] + for i in range(0, len(dhash), 4) + ] + ) + signature["dhash_hex"] = dhash_hex + + # Color histogram features + if method in ["histogram", "combined"]: + if len(frame.shape) == 3: # Color image + hist_features = [] + for i in range(3): # For each color channel + hist = cv2.calcHist([frame], [i], None, [16], [0, 256]) + hist = cv2.normalize(hist, hist).flatten() + hist_features.extend(hist) + signature["color_hist"] = [float(x) for x in hist_features] + else: # Grayscale + hist = cv2.calcHist([gray], [0], None, [32], [0, 256]) + hist = cv2.normalize(hist, hist).flatten() + signature["gray_hist"] = [float(x) for x in hist] + + # Edge features + if method in ["combined"]: + # Canny edge detection + edges = cv2.Canny(resized, 100, 200) + # Count percentage of edge pixels + edge_ratio = np.count_nonzero(edges) / edges.size + signature["edge_ratio"] = float(edge_ratio) + + # Compute gradient magnitude histogram + sobelx = cv2.Sobel(resized, cv2.CV_64F, 1, 0, ksize=3) + sobely = cv2.Sobel(resized, cv2.CV_64F, 0, 1, ksize=3) + magnitude = np.sqrt(sobelx**2 + sobely**2) + mag_hist = np.histogram(magnitude, bins=8, range=(0, 255))[0] + mag_hist = mag_hist / np.sum(mag_hist) # Normalize + signature["gradient_hist"] = [float(x) for x in mag_hist] + + # Generate a unique hash for the entire signature + signature_str = str(signature) + signature["hash"] = hashlib.sha256(signature_str.encode()).hexdigest() + + return signature + + def compare_signatures(self, sig1, sig2, method="combined") -> float: + """Compare two frame signatures and return similarity score (0-1) + + Args: + sig1: First signature dictionary + sig2: Second signature dictionary + method: Comparison method matching the signature generation method + + Returns: + Similarity score between 0 (different) and 1 (identical) + """ + if sig1 is None or sig2 is None: + return 0.0 + + scores = [] + + # Compare perceptual hashes (Hamming distance) + if method in ["phash", "combined"] and "phash" in sig1 and "phash" in sig2: + phash1 = np.array(sig1["phash"]) + phash2 = np.array(sig2["phash"]) + hamming_dist = np.sum(phash1 != phash2) + max_dist = len(phash1) + phash_score = 1.0 - (hamming_dist / max_dist) + scores.append(phash_score) + + # Compare difference hashes + if method in ["dhash", "combined"] and "dhash" in sig1 and "dhash" in sig2: + dhash1 = np.array(sig1["dhash"]) + dhash2 = np.array(sig2["dhash"]) + hamming_dist = np.sum(dhash1 != dhash2) + max_dist = len(dhash1) + dhash_score = 1.0 - (hamming_dist / max_dist) + scores.append(dhash_score) + + # Compare color histograms + if method in ["histogram", "combined"]: + if "color_hist" in sig1 and "color_hist" in sig2: + hist1 = np.array(sig1["color_hist"]) + hist2 = np.array(sig2["color_hist"]) + # Bhattacharyya distance for histograms + hist_score = cv2.compareHist( + hist1.astype(np.float32), + hist2.astype(np.float32), + cv2.HISTCMP_BHATTACHARYYA, + ) + # Convert to similarity score (0-1) + hist_score = 1.0 - min(hist_score, 1.0) + scores.append(hist_score) + elif "gray_hist" in sig1 and "gray_hist" in sig2: + hist1 = np.array(sig1["gray_hist"]) + hist2 = np.array(sig2["gray_hist"]) + hist_score = cv2.compareHist( + hist1.astype(np.float32), + hist2.astype(np.float32), + cv2.HISTCMP_BHATTACHARYYA, + ) + hist_score = 1.0 - min(hist_score, 1.0) + scores.append(hist_score) + + # Compare edge features + if method in ["combined"] and "edge_ratio" in sig1 and "edge_ratio" in sig2: + edge_diff = abs(sig1["edge_ratio"] - sig2["edge_ratio"]) + edge_score = 1.0 - min(edge_diff, 1.0) + scores.append(edge_score) + + # Compare gradient histograms + if method in ["combined"] and "gradient_hist" in sig1 and "gradient_hist" in sig2: + grad1 = np.array(sig1["gradient_hist"]) + grad2 = np.array(sig2["gradient_hist"]) + grad_score = cv2.compareHist( + grad1.astype(np.float32), + grad2.astype(np.float32), + cv2.HISTCMP_BHATTACHARYYA, + ) + grad_score = 1.0 - min(grad_score, 1.0) + scores.append(grad_score) + + # If no scores were calculated, return 0 + if not scores: + return 0.0 + + # Weight the scores based on reliability (can be adjusted) + weights = { + "phash": self.config.get("phash_weight", 0.35), + "dhash": self.config.get("dhash_weight", 0.25), + "histogram": self.config.get("histogram_weight", 0.2), + "edge_ratio": self.config.get("edge_ratio_weight", 0.1), + "gradient_hist": self.config.get("gradient_hist_weight", 0.1), + } + + # For combined method, use weighted average + if method == "combined": + final_score = 0.0 + total_weight = 0.0 + + if "phash" in sig1 and "phash" in sig2: + final_score += scores[0] * weights["phash"] + total_weight += weights["phash"] + + if "dhash" in sig1 and "dhash" in sig2: + final_score += scores[1] * weights["dhash"] + total_weight += weights["dhash"] + + if ("color_hist" in sig1 and "color_hist" in sig2) or ( + "gray_hist" in sig1 and "gray_hist" in sig2 + ): + final_score += scores[2] * weights["histogram"] + total_weight += weights["histogram"] + + if "edge_ratio" in sig1 and "edge_ratio" in sig2: + final_score += scores[3] * weights["edge_ratio"] + total_weight += weights["edge_ratio"] + + if "gradient_hist" in sig1 and "gradient_hist" in sig2: + final_score += scores[4] * weights["gradient_hist"] + total_weight += weights["gradient_hist"] + + if total_weight > 0: + return final_score / total_weight + else: + return 0.0 + else: + # For single methods, return the calculated score + return scores[0] + + def generate_video_fingerprint(self, frame, method="combined") -> dict: + """Generate a fingerprint from a list of video frames + + Args: + frames: List of video frames + device_id: Camera device ID + method: Signature method to use + + Returns: + Dictionary containing video fingerprint data + """ + + if self.frame_count < self.config.get("duration", 100): + # Generate signature for this frame + signature = self.generate_frame_signature(frame, method) + if signature: + self.fingerprint["frame_signatures"].append(signature) + + self.frame_count += 1 + + return self.frame_count + + def save_fingerprint(self, directory="fingerprints"): + """Save a video fingerprint to disk + + Args: + fingerprint: The fingerprint dictionary to save + directory: Directory to save fingerprints in + + Returns: + Path to the saved fingerprint file + """ + + if not self.fingerprint["frame_signatures"]: + return "" + + if self.frame_count < self.config.get("duration", 100): + return "" + + if self.saved: + return "" + + # Create directory if it doesn't exist + os.makedirs(directory, exist_ok=True) + + # Create filename based on fingerprint ID and timestamp + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + filename = f"fingerprint_{timestamp}.pkl" + filepath = os.path.join(directory, filename) + + # Save fingerprint using pickle + with open(filepath, "wb") as f: + pickle.dump(self.fingerprint, f) + + self.saved = True + + return filepath + + def load_fingerprint(self, filepath): + """Load a fingerprint from disk + + Args: + filepath: Path to the fingerprint file + + Returns: + The loaded fingerprint dictionary or None if loading fails + """ + try: + with open(filepath, "rb") as f: + fingerprint = pickle.load(f) + + return fingerprint + except Exception as e: + print(f"Error loading fingerprint from {filepath}: {str(e)}") + return None + + def compare_video_fingerprints(self, fp1, fp2) -> dict: + """Compare two video fingerprints and return similarity metrics + + Args: + fp1: First fingerprint dictionary + fp2: Second fingerprint dictionary + + Returns: + Dictionary with similarity metrics + """ + if fp1 is None or fp2 is None: + return {"overall_similarity": 0.0, "error": "Invalid fingerprints"} + + # Check if fingerprints use the same method + if fp1.get("method") != fp2.get("method"): + print( + f"Warning: Comparing fingerprints with different methods:\ + {fp1.get('method')} vs {fp2.get('method')}" + ) + + method = fp1.get("method", "combined") + + # Get frame signatures + sigs1 = fp1.get("frame_signatures", []) + sigs2 = fp2.get("frame_signatures", []) + + if not sigs1 or not sigs2: + return {"overall_similarity": 0.0, "error": "Empty frame signatures"} + + # Calculate frame-by-frame similarities + frame_similarities = [] + + # Use dynamic time warping approach for different length fingerprints + if len(sigs1) != len(sigs2): + # Create similarity matrix + sim_matrix = np.zeros((len(sigs1), len(sigs2))) + + for i, sig1 in enumerate(sigs1): + for j, sig2 in enumerate(sigs2): + sim_matrix[i, j] = self.compare_signatures(sig1, sig2, method) + + # Find optimal path through similarity matrix (simplified DTW) + path_similarities = [] + + # For each frame in the shorter fingerprint, find best match in longer one + if len(sigs1) <= len(sigs2): + for i in range(len(sigs1)): + best_match = np.max(sim_matrix[i, :]) + path_similarities.append(best_match) + else: + for j in range(len(sigs2)): + best_match = np.max(sim_matrix[:, j]) + path_similarities.append(best_match) + + frame_similarities = path_similarities + else: + # Direct frame-by-frame comparison for same length fingerprints + for sig1, sig2 in zip(sigs1, sigs2, strict=False): + similarity = self.compare_signatures(sig1, sig2, method) + frame_similarities.append(similarity) + + # Calculate overall metrics + overall_similarity = np.mean(frame_similarities) if frame_similarities else 0.0 + min_similarity = np.min(frame_similarities) if frame_similarities else 0.0 + max_similarity = np.max(frame_similarities) if frame_similarities else 0.0 + + # Calculate temporal consistency (how consistent the similarity is across frames) + temporal_consistency = 1.0 - np.std(frame_similarities) if frame_similarities else 0.0 + + return { + "overall_similarity": float(overall_similarity), + "min_similarity": float(min_similarity), + "max_similarity": float(max_similarity), + "temporal_consistency": float(temporal_consistency), + "frame_similarities": [float(s) for s in frame_similarities], + "name": fp2["metadata"]["name"], + "metadata": fp2["metadata"], + } + + def identify_video(self, threshold=0.85) -> list[dict]: + """Identify a video sample against stored fingerprints + + Args: + sample_fingerprint: Fingerprint of the video to identify + threshold: Minimum similarity threshold for a match + + Returns: + List of matching fingerprints with similarity scores + """ + matches = [] + + # Compare against all cached fingerprints + for fingerprint in self.fingerprint_cache: + comparison = self.compare_video_fingerprints(self.fingerprint, fingerprint) + + if comparison["overall_similarity"] >= threshold: + matches.append( + { + "similarity": comparison["overall_similarity"], + "details": comparison, + } + ) + + # Sort matches by similarity (highest first) + matches.sort(key=lambda x: x["similarity"], reverse=True) + + return matches + + def load_all_fingerprints(self, directory="fingerprints"): + """Load all fingerprints from a directory into cache + + Args: + directory: Directory containing fingerprint files + + Returns: + Number of fingerprints loaded + """ + if not os.path.exists(directory): + return 0 + + count = 0 + for filename in os.listdir(directory): + if filename.endswith(".pkl"): + filepath = os.path.join(directory, filename) + fingerprint = self.load_fingerprint(filepath) + if fingerprint: + self.fingerprint_cache.append(fingerprint) + count += 1 + + return count + + def process_frame(self, frame): + method = self.config.get("method", "combined") + count = self.generate_video_fingerprint(frame, method) + return { + "frame_count": count, + "frame": frame, + } + + def handle_results(self, result, frame): + matched = [] + operations = self.config.get("operations", "compare") + if result["frame_count"] == self.config.get("duration", 100): + if operations == "compare": + matched = self.identify_video(self.config.get("threshold", 0.85)) + elif operations == "generate": + self.save_fingerprint(self.config.get("directory", "./fingerprint")) + else: + print("not supported operation") + + if matched: + if matched[0]["details"]["name"] not in self.result: + self.result.append(matched[0]["details"]["name"]) + cv2.putText( + frame, + f"match with {matched[0]['details']['name']} :{matched[0]['similarity']:.2f}", + (150, frame.shape[0] - 90), + cv2.FONT_HERSHEY_SIMPLEX, + 0.7, + (0, 0, 255), + 2, + ) + else: + cv2.putText( + frame, + "signature done", + (150, frame.shape[0] - 90), + cv2.FONT_HERSHEY_SIMPLEX, + 0.7, + (0, 0, 255), + 2, + ) + else: + cv2.putText( + frame, + "signature in progress", + (150, frame.shape[0] - 90), + cv2.FONT_HERSHEY_SIMPLEX, + 0.7, + (0, 0, 255), + 2, + ) + + def shutdown(self) -> list: + """Release plugin resources""" + if self.config.get("operations", "compare") == "compare": + if self.result: + print(f"{self.__class__.__name__} result: {self.result}\n") + else: + print(f"{self.__class__.__name__} result: no match\n") + + return self.result diff --git a/scripts/pylib/display-twister-harness/camera_shield/requirements.txt b/scripts/pylib/display-twister-harness/camera_shield/requirements.txt new file mode 100644 index 0000000000000..78f46d5f740a3 --- /dev/null +++ b/scripts/pylib/display-twister-harness/camera_shield/requirements.txt @@ -0,0 +1,3 @@ +opencv-python==4.9.0.80 +numpy==1.26.4 +pyyaml diff --git a/scripts/pylib/display-twister-harness/camera_shield/uvc_core/__init__.py b/scripts/pylib/display-twister-harness/camera_shield/uvc_core/__init__.py new file mode 100644 index 0000000000000..7172ccebf93ba --- /dev/null +++ b/scripts/pylib/display-twister-harness/camera_shield/uvc_core/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2025 NXP +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/scripts/pylib/display-twister-harness/camera_shield/uvc_core/camera_controller.py b/scripts/pylib/display-twister-harness/camera_shield/uvc_core/camera_controller.py new file mode 100644 index 0000000000000..5074ea03ae194 --- /dev/null +++ b/scripts/pylib/display-twister-harness/camera_shield/uvc_core/camera_controller.py @@ -0,0 +1,272 @@ +# Copyright (c) 2025 NXP +# +# SPDX-License-Identifier: Apache-2.0 + +import cv2 +import numpy as np + + +class UVCCamera: + def __init__(self, config): + self.device_id = config.get("device_id", 0) + self.res_x = config.get("res_x", 1280) + self.res_y = config.get("res_y", 720) + self.cap = cv2.VideoCapture(self.device_id) + self.prev_frame = None + self.current_alarms = 0 + self.alarm_duration = 5 # Alarm duration in frames + self.fingerprint_cache = {} # Store video fingerprints + self._original_wb = 0 + + def initialize(self): + if not self.cap.isOpened(): + raise Exception(f"Failed to open camera {self.device_id}") + self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.res_x) # Set resolution + self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.res_y) + self.cap.set(cv2.CAP_PROP_AUTOFOCUS, 1) + # Read initial 10 frames to stabilize camera + for _ in range(10): + self.get_frame() + + # Use first 120 frames to optimize focus + best_focus_score = 0 + for i in range(120): + ret, frame = self.get_frame() + if ret: + current_score = self.smart_focus(frame) + if current_score > best_focus_score: + best_focus_score = current_score + if i % 20 == 0: # Periodically adjust focus + self.auto_focus(current_score < 0.5) + self.auto_white_balance(True) + self.print_settings() + + def get_frame(self): + ret, frame = self.cap.read() + if ret: + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + return ret, frame + + def show_frame(self, frame): + cv2.imshow(f"Camera Preview (Device {self.device_id})", frame) + cv2.waitKey(1) + + def auto_focus(self, enable: bool): + """Auto focus control + + Args: + enable: True to enable auto focus, False to disable + """ + self.cap.set(cv2.CAP_PROP_AUTOFOCUS, 1 if enable else 0) + + def smart_focus(self, frame): + """Smart focus algorithm based on image sharpness + + Args: + frame: Current video frame + + Returns: + Focus score value (0-1) + """ + if frame is None: + return 0 + + # Convert to grayscale + gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + + # Calculate Laplacian variance as sharpness metric + laplacian = cv2.Laplacian(gray, cv2.CV_64F) + focus_score = laplacian.var() / 1000 # Normalized to 0-1 range + + # Adjust focus based on sharpness + if focus_score < 0.3: # Image is blurry + self.auto_focus(True) + elif focus_score > 0.7: # Image is sharp + self.auto_focus(False) + + return min(max(focus_score, 0), 1) # Ensure value is within 0-1 range + + def auto_white_balance(self, enable): + """Enable/disable auto white balance with algorithm optimization""" + if enable: + self._original_wb = self.cap.get(cv2.CAP_PROP_WB_TEMPERATURE) + + self.cap.set(cv2.CAP_PROP_AUTO_WB, 1) + + # Sample frames and analyze white balance + wb_scores = [] + for _ in range(10): # Sample 10 frames for better accuracy + ret, frame = self.cap.read() + if not ret: + break + + # Convert to LAB color space and analyze A/B channels + lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB) + a_channel, b_channel = lab[:, :, 1], lab[:, :, 2] + + # Calculate white balance score (lower is better) + wb_score = np.std(a_channel) + np.std(b_channel) + wb_scores.append(wb_score) + + # If score improves significantly, keep current WB + if len(wb_scores) > 1 and wb_score < min(wb_scores[:-1]) * 0.9: + break + else: + if hasattr(self, "_original_wb"): + self.cap.set(cv2.CAP_PROP_AUTO_WB, 0) + self.cap.set(cv2.CAP_PROP_WB_TEMPERATURE, self._original_wb) + ret, frame = self.cap.read() + if ret: + cv2.waitKey(1) + + def capture_image(self, save_path=None): + """Capture current frame at highest resolution and optionally save to file""" + # Set to maximum resolution + self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + ret, frame = self.cap.read() + if not ret: + return None + + if save_path: + cv2.imwrite(save_path, frame) + return frame + + def analyze_image(self, image): + """Perform basic image analysis on captured frame""" + if image is None: + return None + + analysis = {} + # Brightness analysis + gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + analysis["brightness"] = np.mean(gray) + + # Color analysis + lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) + analysis["color_balance"] = { + "a_channel": np.mean(lab[:, :, 1]), + "b_channel": np.mean(lab[:, :, 2]), + } + + # Sharpness analysis + analysis["sharpness"] = cv2.Laplacian(gray, cv2.CV_64F).var() + + return analysis + + def show_analysis(self, analysis, frame=None): + """Display image analysis results on frame if provided""" + if analysis is None: + print("No analysis results to display") + return + + # Print analysis results + print("Image Analysis Results:") + print(f" Brightness: {analysis['brightness']:.2f}") + print(f" Color Balance - A Channel: {analysis['color_balance']['a_channel']:.2f}") + print(f" Color Balance - B Channel: {analysis['color_balance']['b_channel']:.2f}") + print(f" Sharpness: {analysis['sharpness']:.2f}") + + # 在帧上显示分析结果 + if frame is not None: + text_y = 30 + cv2.putText( + frame, + f"Brightness: {analysis['brightness']:.2f}", + (10, text_y), + cv2.FONT_HERSHEY_SIMPLEX, + 0.7, + (0, 255, 0), + 2, + ) + text_y += 30 + cv2.putText( + frame, + f"A Channel: {analysis['color_balance']['a_channel']:.2f}", + (10, text_y), + cv2.FONT_HERSHEY_SIMPLEX, + 0.7, + (0, 255, 0), + 2, + ) + text_y += 30 + cv2.putText( + frame, + f"B Channel: {analysis['color_balance']['b_channel']:.2f}", + (10, text_y), + cv2.FONT_HERSHEY_SIMPLEX, + 0.7, + (0, 255, 0), + 2, + ) + text_y += 30 + cv2.putText( + frame, + f"Sharpness: {analysis['sharpness']:.2f}", + (10, text_y), + cv2.FONT_HERSHEY_SIMPLEX, + 0.7, + (0, 255, 0), + 2, + ) + + def load_image(self, file_path): + """Load a saved image from file + + Args: + file_path: Path to the image file + + Returns: + The loaded image in BGR format, or None if loading fails + """ + try: + image = cv2.imread(file_path) + if image is None: + print(f"Failed to load image from {file_path}") + return image + except Exception as e: + print(f"Error loading image: {str(e)}") + return None + + def analyze_multiple_frames(self, num_frames=10): + """Analyze multiple frames and return average results""" + results = [] + + for _ in range(num_frames): + frame = self.capture_image() + if frame is None: + continue + + analysis = self.analyze_image(frame) + results.append(analysis) + + if not results: + return None + + # Calculate averages + avg_analysis = { + "brightness": np.mean([r["brightness"] for r in results]), + "color_balance": { + "a_channel": np.mean([r["color_balance"]["a_channel"] for r in results]), + "b_channel": np.mean([r["color_balance"]["b_channel"] for r in results]), + }, + "sharpness": np.mean([r["sharpness"] for r in results]), + } + + return avg_analysis + + def print_settings(self): + """Print current camera settings""" + print(f"Camera {self.device_id} Settings:") + print( + f"Resolution: {int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))}\ + x{int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))}" + ) + print(f" FPS: {self.cap.get(cv2.CAP_PROP_FPS):.2f}") + print(f" Auto Focus: {'ON' if self.cap.get(cv2.CAP_PROP_AUTOFOCUS) else 'OFF'}") + print(f" Auto White Balance: {'ON' if self.cap.get(cv2.CAP_PROP_AUTO_WB) else 'OFF'}") + + def release(self): + self.cap.release() + cv2.destroyAllWindows() diff --git a/scripts/pylib/display-twister-harness/camera_shield/uvc_core/plugin_base.py b/scripts/pylib/display-twister-harness/camera_shield/uvc_core/plugin_base.py new file mode 100644 index 0000000000000..ed1e50e9fd191 --- /dev/null +++ b/scripts/pylib/display-twister-harness/camera_shield/uvc_core/plugin_base.py @@ -0,0 +1,40 @@ +# Copyright (c) 2025 NXP +# +# SPDX-License-Identifier: Apache-2.0 + +from abc import ABC, abstractmethod + + +class DetectionPlugin(ABC): + def __init__(self, name, config: dict): + self.name = name + self.config = config + self.update_count = 0 + self.old_result = {} + + @abstractmethod + def initialize(self): + """initialize""" + + @abstractmethod + def process_frame(self, frame) -> dict: + """process_frame""" + + @abstractmethod + def handle_results(self, result, frame): + """handle_results""" + + @abstractmethod + def shutdown(self) -> list: + """release resources""" + + +class PluginManager: + def __init__(self): + self.plugins = {} + + def register_plugin(self, name: str, plugin_class): + self.plugins[name] = plugin_class + + def create_plugin(self, name: str, config: dict) -> DetectionPlugin: + return self.plugins[name](config) diff --git a/scripts/pylib/display-twister-harness/conftest.py b/scripts/pylib/display-twister-harness/conftest.py new file mode 100644 index 0000000000000..146b0f30a4da7 --- /dev/null +++ b/scripts/pylib/display-twister-harness/conftest.py @@ -0,0 +1,7 @@ +# Copyright 2025 NXP +# +# SPDX-License-Identifier: Apache-2.0 + + +def pytest_addoption(parser): + parser.addoption('--config') diff --git a/scripts/pylib/display-twister-harness/test_display.py b/scripts/pylib/display-twister-harness/test_display.py new file mode 100644 index 0000000000000..c3cfafedf008c --- /dev/null +++ b/scripts/pylib/display-twister-harness/test_display.py @@ -0,0 +1,44 @@ +# Copyright (c) 2025 NXP +# +# SPDX-License-Identifier: Apache-2.0 +import logging + +import pytest +import yaml +from camera_shield.main import Application +from twister_harness import DeviceAdapter + +logger = logging.getLogger(__name__) + + +@pytest.fixture +def test_config(request): + return request.config.getoption("--config") + + +def get_prompt(config): + ''' + get_prompt: get prompt string + ''' + with open(config) as yaml_file: + data = yaml.safe_load(yaml_file) + + return data.get('test', {}) + + +def test_display_harness(dut: DeviceAdapter, test_config): + if not test_config: + pytest.skip('test_config not provided') + + testcase_config = get_prompt(test_config) + assert testcase_config != {}, "test config not exist" + + dut.readlines_until( + regex=testcase_config.get("prompt", 'uart:~$'), + timeout=testcase_config.get("timeout", 30), + print_output=True, + ) + app = Application(test_config) + result = app.run() + logging.info(result) + assert sorted(testcase_config.get("expect", ['PASS'])) == sorted(result) diff --git a/scripts/pylib/twister/twisterlib/harness.py b/scripts/pylib/twister/twisterlib/harness.py index 16d154a88fad4..0e9d27cf0ddd4 100644 --- a/scripts/pylib/twister/twisterlib/harness.py +++ b/scripts/pylib/twister/twisterlib/harness.py @@ -629,6 +629,27 @@ def _parse_report_file(self, report): self.status = TwisterStatus.SKIP self.instance.reason = 'No tests collected' +class Display_capture(Pytest): + def generate_command(self): + config = self.instance.testsuite.harness_config + pytest_root = [os.path.join(ZEPHYR_BASE, 'scripts', 'pylib', 'display-twister-harness')] + config['pytest_root'] = pytest_root + + command = super().generate_command() + if test_config_file := self._get_display_config_file(config): + command.append(f'--config={test_config_file}') + else: + logger.warning('No config file provided') + return command + + def _get_display_config_file(self, harness_config): + if test_config_file := harness_config.get('display_capture_config'): + test_config_path = os.path.join(self.source_dir, test_config_file) + logger.info(f'test_config_path = {test_config_path}') + if os.path.exists(test_config_path): + return test_config_path + return None + class Shell(Pytest): def generate_command(self): diff --git a/scripts/pylib/twister/twisterlib/testinstance.py b/scripts/pylib/twister/twisterlib/testinstance.py index 53e17b338ab82..793b6bbdc750a 100644 --- a/scripts/pylib/twister/twisterlib/testinstance.py +++ b/scripts/pylib/twister/twisterlib/testinstance.py @@ -222,6 +222,7 @@ def testsuite_runnable(testsuite, fixtures): # console harness allows us to run the test and capture data. if testsuite.harness in [ 'console', + 'display_capture', 'ztest', 'pytest', 'power', @@ -316,7 +317,7 @@ def check_runnable(self, device_testing) # check if test is runnable in pytest - if self.testsuite.harness in ['pytest', 'shell', 'power']: + if self.testsuite.harness in ['pytest', 'shell', 'power', 'display_capture']: target_ready = bool( filter == 'runnable' or simulator and simulator.name in SUPPORTED_SIMS_IN_PYTEST ) diff --git a/scripts/requirements-run-test.txt b/scripts/requirements-run-test.txt index 616e352faa3cf..6c3f3d93cf8d9 100644 --- a/scripts/requirements-run-test.txt +++ b/scripts/requirements-run-test.txt @@ -20,3 +20,7 @@ python-can>=4.3.0 # used for SPDX files validation spdx-tools + +# for display test +opencv-python>=4.9.0.80 +numpy>=1.26.4 diff --git a/scripts/schemas/twister/testsuite-schema.yaml b/scripts/schemas/twister/testsuite-schema.yaml index 26bda49053da9..ad6691c1327b3 100644 --- a/scripts/schemas/twister/testsuite-schema.yaml +++ b/scripts/schemas/twister/testsuite-schema.yaml @@ -127,6 +127,9 @@ schema;scenario-schema: required: true "expected": type: str + "display_capture_config": + type: str + required: false "type": type: str required: false diff --git a/tests/drivers/display/display_check/CMakeLists.txt b/tests/drivers/display/display_check/CMakeLists.txt new file mode 100644 index 0000000000000..88dd476d197c0 --- /dev/null +++ b/tests/drivers/display/display_check/CMakeLists.txt @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(display) + +target_sources(app PRIVATE src/main.c) diff --git a/tests/drivers/display/display_check/boards/ek_ra8d1.conf b/tests/drivers/display/display_check/boards/ek_ra8d1.conf new file mode 100644 index 0000000000000..0e0a8cb0f908a --- /dev/null +++ b/tests/drivers/display/display_check/boards/ek_ra8d1.conf @@ -0,0 +1,4 @@ +# Copyright (c) 2024 Renesas Electronics Corporation +# SPDX-License-Identifier: Apache-2.0 + +CONFIG_HEAP_MEM_POOL_SIZE=90440 diff --git a/tests/drivers/display/display_check/boards/frdm_rw612.conf b/tests/drivers/display/display_check/boards/frdm_rw612.conf new file mode 100644 index 0000000000000..b0995a93b88dd --- /dev/null +++ b/tests/drivers/display/display_check/boards/frdm_rw612.conf @@ -0,0 +1,6 @@ +# +# Copyright 2025 NXP +# +# SPDX-License-Identifier: Apache-2.0 +# +CONFIG_PM=y diff --git a/tests/drivers/display/display_check/boards/frdm_rw612.overlay b/tests/drivers/display/display_check/boards/frdm_rw612.overlay new file mode 100644 index 0000000000000..65821c8b3eb56 --- /dev/null +++ b/tests/drivers/display/display_check/boards/frdm_rw612.overlay @@ -0,0 +1,9 @@ +/* + * Copyright 2025 NXP + * + * SPDX-License-Identifier: Apache-2.0 + */ + +&standby { + status = "okay"; +}; diff --git a/tests/drivers/display/display_check/boards/mimxrt1170_evk_mimxrt1176_cm7.conf b/tests/drivers/display/display_check/boards/mimxrt1170_evk_mimxrt1176_cm7.conf new file mode 100644 index 0000000000000..af2acc5acf0de --- /dev/null +++ b/tests/drivers/display/display_check/boards/mimxrt1170_evk_mimxrt1176_cm7.conf @@ -0,0 +1,10 @@ +# +# Copyright 2022, NXP +# +# SPDX-License-Identifier: Apache-2.0 +# + +# Sample will allocate buffer equal to: (panelwidth / 8) * (panelwidth / 4) * pixel depth. For a +# 1280x720 display in a 32-bpp format (e.g. ARGB8888), this is (720 / 8) * (720 / 4) * 4 = 64800 +# bytes. We include 128 bytes of padding for kernel heap structures +CONFIG_HEAP_MEM_POOL_SIZE=64928 diff --git a/tests/drivers/display/display_check/boards/mimxrt1170_evkb_cm7.conf b/tests/drivers/display/display_check/boards/mimxrt1170_evkb_cm7.conf new file mode 100644 index 0000000000000..c8e665357ca37 --- /dev/null +++ b/tests/drivers/display/display_check/boards/mimxrt1170_evkb_cm7.conf @@ -0,0 +1,10 @@ +# +# Copyright 2023, NXP +# +# SPDX-License-Identifier: Apache-2.0 +# + +# Sample will allocate buffer equal to: (panelwidth / 8) * (panelwidth / 4) * pixel depth. For a +# 1280x720 display in a 32-bpp format (e.g. ARGB8888), this is (720 / 8) * (720 / 4) * 4 = 64800 +# bytes. We include 128 bytes of padding for kernel heap structures +CONFIG_HEAP_MEM_POOL_SIZE=64928 diff --git a/tests/drivers/display/display_check/boards/mimxrt595_evk_mimxrt595s_cm33.conf b/tests/drivers/display/display_check/boards/mimxrt595_evk_mimxrt595s_cm33.conf new file mode 100644 index 0000000000000..c8e665357ca37 --- /dev/null +++ b/tests/drivers/display/display_check/boards/mimxrt595_evk_mimxrt595s_cm33.conf @@ -0,0 +1,10 @@ +# +# Copyright 2023, NXP +# +# SPDX-License-Identifier: Apache-2.0 +# + +# Sample will allocate buffer equal to: (panelwidth / 8) * (panelwidth / 4) * pixel depth. For a +# 1280x720 display in a 32-bpp format (e.g. ARGB8888), this is (720 / 8) * (720 / 4) * 4 = 64800 +# bytes. We include 128 bytes of padding for kernel heap structures +CONFIG_HEAP_MEM_POOL_SIZE=64928 diff --git a/tests/drivers/display/display_check/boards/stm32n6570_dk.conf b/tests/drivers/display/display_check/boards/stm32n6570_dk.conf new file mode 100644 index 0000000000000..89de7acad83c5 --- /dev/null +++ b/tests/drivers/display/display_check/boards/stm32n6570_dk.conf @@ -0,0 +1,6 @@ +# Copyright (c) 2025 ST Microelectronics +# SPDX-License-Identifier: Apache-2.0 + +# On STM32N6 DK board, display being larger, it requires +# more memory to draw squares on the display (40K) +CONFIG_HEAP_MEM_POOL_SIZE=65536 diff --git a/tests/drivers/display/display_check/boards/stm32n6570_dk_stm32n657xx_sb.conf b/tests/drivers/display/display_check/boards/stm32n6570_dk_stm32n657xx_sb.conf new file mode 100644 index 0000000000000..89de7acad83c5 --- /dev/null +++ b/tests/drivers/display/display_check/boards/stm32n6570_dk_stm32n657xx_sb.conf @@ -0,0 +1,6 @@ +# Copyright (c) 2025 ST Microelectronics +# SPDX-License-Identifier: Apache-2.0 + +# On STM32N6 DK board, display being larger, it requires +# more memory to draw squares on the display (40K) +CONFIG_HEAP_MEM_POOL_SIZE=65536 diff --git a/tests/drivers/display/display_check/boards/stm32u5g9j_dk2.conf b/tests/drivers/display/display_check/boards/stm32u5g9j_dk2.conf new file mode 100644 index 0000000000000..7897a882952c1 --- /dev/null +++ b/tests/drivers/display/display_check/boards/stm32u5g9j_dk2.conf @@ -0,0 +1,10 @@ +# +# Copyright (c) 2025 STMicroelectronics +# +# SPDX-License-Identifier: Apache-2.0 +# + +CONFIG_HEAP_MEM_POOL_SIZE=172840 +CONFIG_IDLE_STACK_SIZE=8192 +CONFIG_PRIVILEGED_STACK_SIZE=8192 +CONFIG_MAIN_STACK_SIZE=4096 diff --git a/tests/drivers/display/display_check/display_config.yaml b/tests/drivers/display/display_check/display_config.yaml new file mode 100644 index 0000000000000..ba48194ee742a --- /dev/null +++ b/tests/drivers/display/display_check/display_config.yaml @@ -0,0 +1,22 @@ +case_config: {device_id: 0, fps: 30, res_y: 720, res_x: 1280, run_time: 20} +plugins: +- class: VideoSignaturePlugin + config: + dhash_weight: 0.25 + directory: ${DISPLAY_TEST_DIR}/./fingerprints + duration: 100 + edge_ratio_weight: 0.1 + gradient_hist_weight: 0.1 + histogram_weight: 0.2 + metadata: {name: tests.drivers.display.check.shield} + method: combined + operations: compare + phash_weight: 0.35 + threshold: 0.65 + module: .plugins.signature_plugin + name: signature + status: enable +test: + expect: [tests.drivers.display.check.shield] + prompt: Display starts + timeout: 30 diff --git a/tests/drivers/display/display_check/prj.conf b/tests/drivers/display/display_check/prj.conf new file mode 100644 index 0000000000000..0fd50113e1c76 --- /dev/null +++ b/tests/drivers/display/display_check/prj.conf @@ -0,0 +1,7 @@ +# Copyright 2025 NXP +# SPDX-License-Identifier: Apache-2.0 + +CONFIG_HEAP_MEM_POOL_SIZE=16384 +CONFIG_LOG=y +CONFIG_DISPLAY=y +CONFIG_ZTEST=y diff --git a/tests/drivers/display/display_check/src/main.c b/tests/drivers/display/display_check/src/main.c new file mode 100644 index 0000000000000..972b889ea32e3 --- /dev/null +++ b/tests/drivers/display/display_check/src/main.c @@ -0,0 +1,382 @@ +/* + * Copyright (c) 2019 Jan Van Winkel + * Copyright (c) 2025 NXP + * + * Based on ST7789V sample: + * Copyright (c) 2019 Marc Reilly + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_REGISTER(test, LOG_LEVEL_INF); + +#include +#include +#include +#include + +#ifdef CONFIG_ARCH_POSIX +#include "posix_board_if.h" +#endif + +enum corner { + TOP_LEFT, + TOP_RIGHT, + BOTTOM_RIGHT, + BOTTOM_LEFT +}; + +typedef void (*fill_buffer)(enum corner corner, uint8_t grey, uint8_t *buf, + size_t buf_size); + + +#ifdef CONFIG_ARCH_POSIX +static void posix_exit_main(int exit_code) +{ + posix_exit(exit_code); +} +#endif + +static void fill_buffer_argb8888(enum corner corner, uint8_t grey, uint8_t *buf, + size_t buf_size) +{ + uint32_t color = 0; + + switch (corner) { + case TOP_LEFT: + color = 0xFFFF0000u; + break; + case TOP_RIGHT: + color = 0xFF00FF00u; + break; + case BOTTOM_RIGHT: + color = 0xFF0000FFu; + break; + case BOTTOM_LEFT: + color = 0xFF000000u | grey << 16 | grey << 8 | grey; + break; + } + + for (size_t idx = 0; idx < buf_size; idx += 4) { + *((uint32_t *)(buf + idx)) = color; + } +} + +static void fill_buffer_rgb888(enum corner corner, uint8_t grey, uint8_t *buf, + size_t buf_size) +{ + uint32_t color = 0; + + switch (corner) { + case TOP_LEFT: + color = 0x00FF0000u; + break; + case TOP_RIGHT: + color = 0x0000FF00u; + break; + case BOTTOM_RIGHT: + color = 0x000000FFu; + break; + case BOTTOM_LEFT: + color = grey << 16 | grey << 8 | grey; + break; + } + + for (size_t idx = 0; idx < buf_size; idx += 3) { + *(buf + idx + 0) = color >> 16; + *(buf + idx + 1) = color >> 8; + *(buf + idx + 2) = color >> 0; + } +} + +static uint16_t get_rgb565_color(enum corner corner, uint8_t grey) +{ + uint16_t color = 0; + uint16_t grey_5bit; + + switch (corner) { + case TOP_LEFT: + color = 0xF800u; + break; + case TOP_RIGHT: + color = 0x07E0u; + break; + case BOTTOM_RIGHT: + color = 0x001Fu; + break; + case BOTTOM_LEFT: + grey_5bit = grey & 0x1Fu; + /* shift the green an extra bit, it has 6 bits */ + color = grey_5bit << 11 | grey_5bit << (5 + 1) | grey_5bit; + break; + } + return color; +} + +static void fill_buffer_bgr565(enum corner corner, uint8_t grey, uint8_t *buf, + size_t buf_size) +{ + uint16_t color = get_rgb565_color(corner, grey); + + for (size_t idx = 0; idx < buf_size; idx += 2) { + *(buf + idx + 0) = (color >> 8) & 0xFFu; + *(buf + idx + 1) = (color >> 0) & 0xFFu; + } +} + +static void fill_buffer_rgb565(enum corner corner, uint8_t grey, uint8_t *buf, + size_t buf_size) +{ + uint16_t color = get_rgb565_color(corner, grey); + + for (size_t idx = 0; idx < buf_size; idx += 2) { + *(uint16_t *)(buf + idx) = color; + } +} + +static void fill_buffer_mono(enum corner corner, uint8_t grey, + uint8_t black, uint8_t white, + uint8_t *buf, size_t buf_size) +{ + uint16_t color; + + switch (corner) { + case BOTTOM_LEFT: + color = (grey & 0x01u) ? white : black; + break; + default: + color = black; + break; + } + + memset(buf, color, buf_size); +} + +static inline void fill_buffer_l_8(enum corner corner, uint8_t grey, uint8_t *buf, size_t buf_size) +{ + for (size_t idx = 0; idx < buf_size; idx += 1) { + *(uint8_t *)(buf + idx) = grey; + } +} + +static inline void fill_buffer_mono01(enum corner corner, uint8_t grey, + uint8_t *buf, size_t buf_size) +{ + fill_buffer_mono(corner, grey, 0x00u, 0xFFu, buf, buf_size); +} + +static inline void fill_buffer_mono10(enum corner corner, uint8_t grey, + uint8_t *buf, size_t buf_size) +{ + fill_buffer_mono(corner, grey, 0xFFu, 0x00u, buf, buf_size); +} + +int test_display(void) +{ + size_t x; + size_t y; + size_t rect_w; + size_t rect_h; + size_t h_step; + size_t scale; + size_t grey_count; + uint8_t bg_color; + uint8_t *buf; + int32_t grey_scale_sleep; + const struct device *display_dev; + struct display_capabilities capabilities; + struct display_buffer_descriptor buf_desc; + size_t buf_size = 0; + fill_buffer fill_buffer_fnc = NULL; + + display_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_display)); + if (!device_is_ready(display_dev)) { + LOG_ERR("Device %s not found. Aborting test.", + display_dev->name); +#ifdef CONFIG_ARCH_POSIX + posix_exit_main(1); +#else + return 0; +#endif + } + + LOG_INF("Display test for %s", display_dev->name); + display_get_capabilities(display_dev, &capabilities); + + if (capabilities.screen_info & SCREEN_INFO_MONO_VTILED) { + rect_w = 16; + rect_h = 8; + } else { + rect_w = 2; + rect_h = 1; + } + + if ((capabilities.x_resolution < 3 * rect_w) || + (capabilities.y_resolution < 3 * rect_h) || + (capabilities.x_resolution < 8 * rect_h)) { + rect_w = capabilities.x_resolution * 40 / 100; + rect_h = capabilities.y_resolution * 40 / 100; + h_step = capabilities.y_resolution * 20 / 100; + scale = 1; + } else { + h_step = rect_h; + scale = (capabilities.x_resolution / 8) / rect_h; + } + + rect_w *= scale; + rect_h *= scale; + + if (capabilities.screen_info & SCREEN_INFO_EPD) { + grey_scale_sleep = 10000; + } else { + grey_scale_sleep = 100; + } + + if (capabilities.screen_info & SCREEN_INFO_X_ALIGNMENT_WIDTH) { + rect_w = capabilities.x_resolution; + } + + buf_size = rect_w * rect_h; + + if (buf_size < (capabilities.x_resolution * h_step)) { + buf_size = capabilities.x_resolution * h_step; + } + + switch (capabilities.current_pixel_format) { + case PIXEL_FORMAT_ARGB_8888: + bg_color = 0x00u; + fill_buffer_fnc = fill_buffer_argb8888; + buf_size *= 4; + break; + case PIXEL_FORMAT_RGB_888: + bg_color = 0xFFu; + fill_buffer_fnc = fill_buffer_rgb888; + buf_size *= 3; + break; + case PIXEL_FORMAT_RGB_565: + bg_color = 0xFFu; + fill_buffer_fnc = fill_buffer_rgb565; + buf_size *= 2; + break; + case PIXEL_FORMAT_BGR_565: + bg_color = 0xFFu; + fill_buffer_fnc = fill_buffer_bgr565; + buf_size *= 2; + break; + case PIXEL_FORMAT_L_8: + bg_color = 0xFFu; + fill_buffer_fnc = fill_buffer_l_8; + break; + case PIXEL_FORMAT_MONO01: + bg_color = 0xFFu; + fill_buffer_fnc = fill_buffer_mono01; + buf_size = DIV_ROUND_UP(DIV_ROUND_UP( + buf_size, NUM_BITS(uint8_t)), sizeof(uint8_t)); + break; + case PIXEL_FORMAT_MONO10: + bg_color = 0x00u; + fill_buffer_fnc = fill_buffer_mono10; + buf_size = DIV_ROUND_UP(DIV_ROUND_UP( + buf_size, NUM_BITS(uint8_t)), sizeof(uint8_t)); + break; + default: + LOG_ERR("Unsupported pixel format. Aborting test."); +#ifdef CONFIG_ARCH_POSIX + posix_exit_main(1); +#else + return 0; +#endif + } + + buf = k_malloc(buf_size); + + if (buf == NULL) { + LOG_ERR("Could not allocate memory. Aborting test."); +#ifdef CONFIG_ARCH_POSIX + posix_exit_main(1); +#else + return 0; +#endif + } + + (void)memset(buf, bg_color, buf_size); + + buf_desc.buf_size = buf_size; + buf_desc.pitch = capabilities.x_resolution; + buf_desc.width = capabilities.x_resolution; + buf_desc.height = h_step; + + /* + * The following writes will only render parts of the image, + * so turn this option on. + * This allows double-buffered displays to hold the pixels + * back until the image is complete. + */ + buf_desc.frame_incomplete = true; + + for (int idx = 0; idx < capabilities.y_resolution; idx += h_step) { + /* + * Tweaking the height value not to draw outside of the display. + * It is required when using a monochrome display whose vertical + * resolution can not be divided by 8. + */ + if ((capabilities.y_resolution - idx) < h_step) { + buf_desc.height = (capabilities.y_resolution - idx); + } + display_write(display_dev, 0, idx, &buf_desc, buf); + } + + buf_desc.pitch = rect_w; + buf_desc.width = rect_w; + buf_desc.height = rect_h; + + fill_buffer_fnc(TOP_LEFT, 0, buf, buf_size); + x = 0; + y = 0; + display_write(display_dev, x, y, &buf_desc, buf); + + fill_buffer_fnc(TOP_RIGHT, 0, buf, buf_size); + x = capabilities.x_resolution - rect_w; + y = 0; + display_write(display_dev, x, y, &buf_desc, buf); + + /* + * This is the last write of the frame, so turn this off. + * Double-buffered displays will now present the new image + * to the user. + */ + buf_desc.frame_incomplete = false; + + fill_buffer_fnc(BOTTOM_RIGHT, 0, buf, buf_size); + x = capabilities.x_resolution - rect_w; + y = capabilities.y_resolution - rect_h; + display_write(display_dev, x, y, &buf_desc, buf); + + display_blanking_off(display_dev); + + grey_count = 50; + x = 0; + y = capabilities.y_resolution - rect_h; + + LOG_INF("Display starts"); + while (1) { + fill_buffer_fnc(BOTTOM_LEFT, grey_count, buf, buf_size); + display_write(display_dev, x, y, &buf_desc, buf); + k_msleep(grey_scale_sleep); + } + +#ifdef CONFIG_ARCH_POSIX + posix_exit_main(0); +#endif + return 0; +} + + +ZTEST(display_test, test_display_by_capture) +{ + test_display(); +} + + +ZTEST_SUITE(display_test, NULL, NULL, NULL, NULL, NULL); diff --git a/tests/drivers/display/display_check/testcase.yaml b/tests/drivers/display/display_check/testcase.yaml new file mode 100644 index 0000000000000..f5b0010c4fc57 --- /dev/null +++ b/tests/drivers/display/display_check/testcase.yaml @@ -0,0 +1,83 @@ +common: + tags: + - drivers + - display + harness: display_capture + harness_config: + pytest_dut_scope: session + display_capture_config: "display_config.yaml" +tests: + tests.drivers.display.check.g1120b0mipi: + platform_allow: + - mimxrt595_evk/mimxrt595s/cm33 + - mimxrt700_evk/mimxrt798s/cm33_cpu0 + extra_args: SHIELD=g1120b0mipi + extra_configs: + - CONFIG_PM=y + - CONFIG_PM_DEVICE=y + - CONFIG_IDLE_STACK_SIZE=400 + - CONFIG_TEST=y + harness_config: + pytest_dut_scope: session + fixture: fixture_display_g1120b0mipi + display_capture_config: "display_config.yaml" + tests.drivers.display.check.st_b_lcd40_dsi1_mb1166_a09: + filter: dt_compat_enabled("frida,nt35510") + platform_allow: stm32h747i_disco/stm32h747xx/m7 + extra_args: SHIELD=st_b_lcd40_dsi1_mb1166_a09 + harness_config: + fixture: fixture_display + pytest_dut_scope: session + display_capture_config: "display_config.yaml" + tests.drivers.display.check.rk043fn02h_ct: + platform_allow: + - mimxrt1064_evk + - mimxrt1060_evk@A/mimxrt1062/qspi + - mimxrt1060_evk@B/mimxrt1062/qspi + - mimxrt1060_evk@C/mimxrt1062/qspi + - mimxrt1050_evk/mimxrt1052/hyperflash + - mimxrt1040_evk + integration_platforms: + - mimxrt1040_evk + extra_args: SHIELD=rk043fn02h_ct + harness_config: + fixture: fixture_display + pytest_dut_scope: session + display_capture_config: "display_config.yaml" + tests.drivers.display.check.shield: + # This test case is intended to verify support for shields on boards + # known to support them. It is not intended to cover all combinations + # of boards and shields, but rather serve as a method to test each + # display shield within Zephyr. + # Boards with built-in displays are also covered by this testcase. + filter: dt_chosen_enabled("zephyr,display") + harness_config: + fixture: fixture_display + pytest_dut_scope: session + display_capture_config: "display_config.yaml" + extra_args: + - platform:lpcxpresso55s69/lpc55s69/cpu0:SHIELD=adafruit_2_8_tft_touch_v2 + - platform:nrf52840dk/nrf52840:SHIELD=ssd1306_128x32 + - platform:frdm_k64f/mk64f12:SHIELD=ssd1306_128x64 + - platform:mimxrt685_evk/mimxrt685/cm33:SHIELD=waveshare_epaper_gdeh0213b1 + - platform:nucleo_l433rc_p/stm32l433xx:SHIELD=waveshare_epaper_gdew042t2 + - platform:nrf52dk/nrf52832:SHIELD=st7789v_tl019fqv01 + - platform:mimxrt1010_evk/mimxrt1011:SHIELD=st7789v_waveshare_240x240 + - platform:frdm_k22f/mk22f51212:SHIELD=ls013b7dh03 + - platform:nrf52833dk/nrf52833:SHIELD=st7735r_ada_160x128 + - platform:mimxrt1170_evk/mimxrt1176/cm7:SHIELD=rk055hdmipi4m + - platform:da1469x_dk_pro/da14699:DTC_OVERLAY_FILE=da1469x_dk_pro_mipi_dbi.overlay + - platform:nrf52840dk/nrf52840:SHIELD=max7219_8x8 + - platform:stm32h747i_disco/stm32h747xx/m7:SHIELD=st_b_lcd40_dsi1_mb1166 + - platform:mimxrt1064_evk/mimxrt1064:SHIELD=rk043fn66hs_ctg + - platform:mimxrt1060_evk/mimxrt1062:SHIELD=rk043fn66hs_ctg + - platform:mimxrt1050_evk/mimxrt1052:SHIELD=rk043fn66hs_ctg + - platform:mimxrt1040_evk/mimxrt1042:SHIELD=rk043fn66hs_ctg + - platform:frdm_mcxn947/mcxn947/cpu0:SHIELD=lcd_par_s035_8080 + - platform:mcx_n9xx_evk/mcxn947/cpu0:SHIELD=lcd_par_s035_8080 + - platform:frdm_mcxn236/mcxn236:SHIELD=lcd_par_s035_8080 + - platform:frdm_mcxa156/mcxa156:SHIELD=lcd_par_s035_8080 + - platform:frdm_rw612:SHIELD=lcd_par_s035_spi + - platform:ek_ra8d1:SHIELD=rtkmipilcdb00000be + - platform:ek_ra8d1:SHIELD=rtk7eka6m3b00001bu + - platform:nucleo_g071rb/stm32g071xx:SHIELD=x_nucleo_gfx01m2