import sys
import torch
# 시스템 및 PyTorch 정보 출력
print(f"Python Version: {sys.version.split()[0]}")
print(f"PyTorch Version: {torch.__version__}")
print(f"GPU 인식 성공: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"사용 가능 기기: {torch.cuda.get_device_name(0)}")
else:
print("현재 GPU를 사용할 수 없습니다. 드라이버나 ROCm 설정을 확인하세요.")
# 실제 텐서 연산 테스트
if torch.cuda.is_available():
x = torch.randn(1, 3).to("cuda")
print("GPU 텐서 연산 테스트 완료:", x)
실행
(.venv) bluesanta@bluesanta-AI-Series:~/Application/Llama$ python gpu_check.py
Python Version: 3.12.3
PyTorch Version: 2.9.1+rocm7.10.0
GPU 인식 성공: True
사용 가능 기기: AMD Radeon 890M Graphics
/home/bluesanta/Application/Llama/gpu_check.py:17: UserWarning: expandable_segments not supported on this platform (Triggered internally at /__w/rockrel/rockrel/external-builds/pytorch/pytorch/c10/hip/HIPAllocatorConfig.h:36.)
x = torch.randn(1, 3).to("cuda")
GPU 텐서 연산 테스트 완료: tensor([[-0.5998, -1.3418, -0.5339]], device='cuda:0')
[W121 10:44:08.608317046 AllocatorConfig.cpp:28] Warning: PYTORCH_CUDA_ALLOC_CONF is deprecated, use PYTORCH_ALLOC_CONF instead (function operator())
PYTORCH_ALLOC_CONF 환경 변수 추가
~/.bashrc 수정
(.venv) bluesanta@bluesanta-AI-Series:~/Application/Llama$ vi ~/.bashrc
(.venv) bluesanta@bluesanta-AI-Series:~/Application/stable_diffusion$ cd ComfyUI/
(.venv) bluesanta@bluesanta-AI-Series:~/Application/stable_diffusion/ComfyUI$ pip install -r requirements.txt
ComfyUI 실행
(.venv) bluesanta@bluesanta-AI-Series:~/Application/stable_diffusion/ComfyUI$ python main.py
Checkpoint files will always be loaded safely.
Total VRAM 14844 MB, total RAM 29688 MB
pytorch version: 2.9.1+rocm7.10.0
Set: torch.backends.cudnn.enabled = False for better AMD performance.
AMD arch: gfx1150
ROCm version: (7, 2)
Set vram state to: NORMAL_VRAM
Device: cuda:0 AMD Radeon 890M Graphics : native
Using async weight offloading with 2 streams
Enabled pinned memory 28203.0
Using sub quadratic optimization for attention, if you have memory or speed issues try using: --use-split-cross-attention
Python version: 3.12.3 (main, Nov 6 2025, 13:44:16) [GCC 13.3.0]
ComfyUI version: 0.7.0
****** User settings have been changed to be stored on the server instead of browser storage. ******
****** For multi-user setups add the --multi-user CLI argument to enable multiple user profiles. ******
ComfyUI frontend version: 1.35.9
[Prompt Server] web root: /home/bluesanta/Application/stable_diffusion/.venv/lib/python3.12/site-packages/comfyui_frontend_package/static
Total VRAM 14844 MB, total RAM 29688 MB
pytorch version: 2.9.1+rocm7.10.0
Set: torch.backends.cudnn.enabled = False for better AMD performance.
AMD arch: gfx1150
ROCm version: (7, 2)
Set vram state to: NORMAL_VRAM
Device: cuda:0 AMD Radeon 890M Graphics : native
Using async weight offloading with 2 streams
Enabled pinned memory 28203.0
Import times for custom nodes:
0.0 seconds: /home/bluesanta/Application/stable_diffusion/ComfyUI/custom_nodes/websocket_image_save.py
Context impl SQLiteImpl.
Will assume non-transactional DDL.
No target revision found.
Starting server
To see the GUI go to: http://127.0.0.1:8188
from machine import Pin
import time
# Define the GPIO pin connected to the built-in LED
# (This may vary depending on your specific ESP32-C3 board)
led_pin = Pin(8, Pin.OUT) # Example: Assuming GPIO8 for the LED
while True:
led_pin.on() # Turn the LED on
print("LED ON")
time.sleep(1) # Wait for 1 second
led_pin.off() # Turn the LED off
print("LED OFF")
time.sleep(1) # Wait for 1 second