EasyOCR

import re  
import cv2  
import easyocr  
import os  
from glob import glob  
import numpy as np  
from PIL import Image, ImageDraw, ImageFont  
# import torch  
  
## ==== ์‚ฌ์ „ ์„ค์ • ====  
# EasyOCR ์„ค์ • ๋ฐ ํŒŒ์ผ ๊ฒฝ๋กœ  
OCR_LANGS = ['ko', 'en']                # ํ•„์š”ํ•œ ์–ธ์–ด  
INPUT_DIR = "idc_samples"               # ์ž…๋ ฅ ์ด๋ฏธ์ง€ ํด๋”  
OUTPUT_DIR = "idc_results"              # ๊ฒฐ๊ณผ ์ €์žฅ ํด๋”  
os.makedirs(OUTPUT_DIR, exist_ok=True)  
  
# ์ •๊ทœ์‹ (3๊ฐ€์ง€ ์œ ํ˜• ์ปค๋ฒ„)  
rrn_combined_regex = re.compile(r"\b\d{6}\s*-\s*\d{7}\b")  # 6์ž๋ฆฌ-7์ž๋ฆฌ  
rrn_nohyphen_regex = re.compile(r"\b\d{13}\b")       # 13์ž๋ฆฌ ๋ถ™์€ ๊ฒฝ์šฐ  
rrn_front_regex = re.compile(r"\b\d{6}\b")           # ์•ž 6์ž๋ฆฌ  
rrn_back_regex = re.compile(r"\b\d{7}\b")            # ๋’ค 7์ž๋ฆฌ  
  
## ==== ์™ธ๋ถ€ ์„ค์ •๊ฐ’ ====  
# ๊ฒฐํ•ฉ ์ž„๊ณ„๊ฐ’ ์„ค์ •  
MAX_X_DISTANCE = 50  
MAX_Y_DIFFERENCE = 10  
  
# ๋น„์‹๋ณ„ํ™” ๋ฐฉ์‹ ๋ฐ ์„ค์ •  
MASK_METHOD = "black"       # "black", "mosaic", "blur", "replace_text"  
REPLACE_TEXT = "[ REDACTED ]"  # replace_text ๋ชจ๋“œ์—์„œ ํ‘œ์‹œํ•  ํ…์ŠคํŠธ (ํ•œ๊ธ€ ์‚ฌ์šฉ x)                           # OpenCV ์ž์ฒด์—์„œ ์ œ๊ณตํ•˜๋Š” ๋‚ด์žฅ ํฐํŠธ๋ฅผ ์‚ฌ์šฉํ•˜๋ฏ€๋กœ ํ•œ๊ธ€ ์ง€์›์ด ์•ˆ ๋ฉ๋‹ˆ๋‹ค.  
MOSAIC_SCALE = 0.05        # (0.01 ~ 0.03 ์ •๋„ ๊ถŒ์žฅ, ์ž‘์„์ˆ˜๋ก ๊ฐ•ํ•œ ๋ชจ์ž์ดํฌ)  
BLUR_KERNEL = (51, 51)     # ๊ฐ€์šฐ์‹œ์•ˆ ๋ธ”๋Ÿฌ ์ปค๋„ ํฌ๊ธฐ (๋ฐ˜๋“œ์‹œ ํ™€์ˆ˜๊ฐ’ ์ž…๋ ฅ, ํด์ˆ˜๋ก ๊ฐ•ํ•จ)  
OCR_LOW_TEXT = 0.4         # ์–ผ๋งˆ๋‚˜ ๋‚ฎ์€ ์‹ ๋ขฐ๋„์˜ ํ…์ŠคํŠธ๊นŒ์ง€ ํฌํ•จํ• ์ง€ ๊ฒฐ์ • (๊ธฐ๋ณธ๊ฐ’: 0.4)  
                           # ํ™”์งˆ์ด ๋‚ฎ๊ฑฐ๋‚˜ ํ๋ฆฐ ๋ฌธ์„œ๋ผ๋ฉด โ†“ ๋…ธ์ด์ฆˆ๊ฐ€ ๋งŽ์€ ์ด๋ฏธ์ง€๋ผ๋ฉด โ†‘ ์ถ”์ฒœ  
# -----------------------  
  
  
# ==== ๋งˆ์Šคํ‚น ์œ ํ‹ธ๋ฆฌํ‹ฐ ํ•จ์ˆ˜ ====  
def mask_bbox(img, bbox, method=MASK_METHOD):  
    """  
    EasyOCR bbox (4์  ์ขŒํ‘œ)๋ฅผ ๊ทธ๋Œ€๋กœ ์‚ฌ์šฉํ•˜์—ฌ ํ…์ŠคํŠธ๋งŒ ๋งˆ์Šคํ‚น  
    - bbox: EasyOCR์—์„œ ๋ฐ˜ํ™˜ํ•œ 4์  ์ขŒํ‘œ  
    - method: ๋งˆ์Šคํ‚น ๋ฐฉ์‹ (black, mosaic, blur, replace_text)    """  
    pts = np.array(bbox, dtype=np.int32)  
  
    if method == "black":  
        cv2.fillPoly(img, [pts], (0, 0, 0))  
  
    elif method == "mosaic":  
        """ ์ž‘์€ ์ด๋ฏธ์ง€๋กœ ์ถ•์†Œ ํ›„ ๋‹ค์‹œ ํ™•๋Œ€ํ•˜์—ฌ ํ”ฝ์…€ํ™” (๋ชจ์ž์ดํฌ) """  
        mask = np.zeros(img.shape[:2], dtype=np.uint8)  
        cv2.fillPoly(mask, [pts], 255)  
        x, y, w, h = cv2.boundingRect(pts) # bbox ํฌ๊ธฐ ๊ณ„์‚ฐ  
        roi = img[y:y+h, x:x+w]  
        mask_roi = mask[y:y+h, x:x+w]  
        if roi.size != 0:  
            small_w = max(1, int(w * MOSAIC_SCALE))  
            small_h = max(1, int(h * MOSAIC_SCALE))  
            small = cv2.resize(roi, (small_w, small_h))  
            mosaic = cv2.resize(small, (w, h), interpolation=cv2.INTER_NEAREST)  
            roi[mask_roi==255] = mosaic[mask_roi==255]  
  
    elif method == "blur":  
        """ ๊ฐ€์šฐ์‹œ์•ˆ ๋ธ”๋Ÿฌ ์ ์šฉ """
        mask = np.zeros(img.shape[:2], dtype=np.uint8)  
        cv2.fillPoly(mask, [pts], 255)  
        x, y, w, h = cv2.boundingRect(pts)  
        roi = img[y:y+h, x:x+w]  
        mask_roi = mask[y:y+h, x:x+w]  
        if roi.size != 0:  
            blur = cv2.GaussianBlur(roi, BLUR_KERNEL, 0)  
            roi[mask_roi==255] = blur[mask_roi==255]  
  
    elif method == "replace_text":  
        # bbox ์ขŒํ‘œ ์ถ”์ถœ  
        pts = np.array(bbox, dtype=np.int32)  
  
        # bbox ํฌ๊ธฐ์™€ ์ค‘์‹ฌ ๊ณ„์‚ฐ  
        x, y, w, h = cv2.boundingRect(pts)  
        center_x = x + w // 2  
        center_y = y + h // 2  
  
        # ํฐ์ƒ‰ ๋ฐ•์Šค ๋งˆ์Šคํ‚น  
        cv2.fillPoly(img, [pts], (255, 255, 255))  
  
        # ๊ธฐ์šธ๊ธฐ ๊ณ„์‚ฐ (๊ฐ€์žฅ ๊ธด ๋ณ€ ๊ธฐ์ค€)  
        max_len = 0  
        angle = 0  
        for i in range(4):  
            dx = pts[(i+1)%4][0] - pts[i][0]  
            dy = pts[(i+1)%4][1] - pts[i][1]  
            length = np.hypot(dx, dy)  
            if length > max_len:  
                max_len = length  
                angle = -np.degrees(np.arctan2(dy, dx))  
  
        # PIL ์ด๋ฏธ์ง€ ์ƒ์„ฑ  
        pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))  
        draw = ImageDraw.Draw(pil_img)  
  
        diagonal = np.hypot(w, h)  
        font_size = min(max(int(diagonal * 0.1), 12), h)  
  
        # ํฐํŠธ ์„ค์ • (๊ตต์€ ๋นจ๊ฐ„ ๊ธ€์”จ)  
        try:  
            font = ImageFont.truetype("malgunbd.ttf", font_size) # ๋ง‘์€ ๊ณ ๋”• bold        except:  
            font = ImageFont.load_default()  
  
        # 7) ํ…์ŠคํŠธ ํฌ๊ธฐ ๊ณ„์‚ฐ  
        bbox_text = draw.textbbox((0, 0), REPLACE_TEXT, font=font)  
        text_w = bbox_text[2] - bbox_text[0]  
        text_h = bbox_text[3] - bbox_text[1]  
  
        # 8) ํ…์ŠคํŠธ ์œ„์น˜ ๊ณ„์‚ฐ (bbox ์ค‘์‹ฌ ๊ธฐ์ค€)  
        offset = 5 # ํ…์ŠคํŠธ ์œ„๋กœ ์˜ฌ๋ฆด ํ”ฝ์…€ ์ˆ˜  
        text_x = center_x - text_w / 2  
        text_y = center_y - text_h / 2 - offset  
  
        # 9) ํ…์ŠคํŠธ ๋ ˆ์ด์–ด ์ƒ์„ฑ  
        text_layer = Image.new("RGBA", pil_img.size, (255, 255, 255, 0))  
        text_draw = ImageDraw.Draw(text_layer)  
        text_draw.text((text_x, text_y), REPLACE_TEXT, font=font, fill=(255, 0, 0, 255))  
  
        # 10) ํ…์ŠคํŠธ ๋ ˆ์ด์–ด ํšŒ์ „ (bbox ์ค‘์‹ฌ ๊ธฐ์ค€)  
        rotated_layer = text_layer.rotate(angle, center=(center_x, center_y), resample=Image.BICUBIC)  
  
        # 11) ํ•ฉ์„ฑ  
        pil_img = Image.alpha_composite(pil_img.convert("RGBA"), rotated_layer)  
        img[:] = cv2.cvtColor(np.array(pil_img.convert("RGB")), cv2.COLOR_RGB2BGR)  
  
    return img  
  
  
def combine_bboxes(bbox1, bbox2):  
    xs1 = [p[0] for p in bbox1]  
    ys1 = [p[1] for p in bbox1]  
    xs2 = [p[0] for p in bbox2]  
    ys2 = [p[1] for p in bbox2]  
  
    x_min = min(min(xs1), min(xs2))  
    x_max = max(max(xs1), max(xs2))  
    y_min = min(min(ys1), min(ys2))  
    y_max = max(max(ys1), max(ys2))  
  
    return [[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]]  
  
  
# ==== ๋ฉ”์ธ ์ฒ˜๋ฆฌ ํ•จ์ˆ˜ ====def process_image(image_path, reader):  
    image = cv2.imread(image_path)  
    if image is None or image.size == 0:  
        print(f"[ERROR] ์ด๋ฏธ์ง€๋ฅผ ์ฝ์„ ์ˆ˜ ์—†์Œ: {image_path}")  
        return 0  
  
    try:  
        ocr_results = reader.readtext(image, low_text=OCR_LOW_TEXT)  
    except Exception as e:  
        print(f"[ERROR] EasyOCR ์‹คํ–‰ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {e}")  
        return 0  
  
    found = 0  
    masked_bboxes_coords = []  
  
    # 1) ํ†ตํ•ฉ ํŒจํ„ด ์ฒ˜๋ฆฌ  
    for bbox, text, conf in ocr_results:  
        m_combined = rrn_combined_regex.search(text)  
        if m_combined:  
            image = mask_bbox(image, bbox, method=MASK_METHOD)  
            found += 1  
            masked_bboxes_coords.append(bbox)  
            print(f"[MASKED] Combined RRN='{m_combined.group()}' | conf={conf:.2f}")  
            continue  
  
        m_nohyphen = rrn_nohyphen_regex.search(text)  
        if m_nohyphen:  
            image = mask_bbox(image, bbox, method=MASK_METHOD)  
            found += 1  
            masked_bboxes_coords.append(bbox)  
            print(f"[MASKED] NoHyphen RRN='{m_nohyphen.group()}' | conf={conf:.2f}")  
            continue  
  
    # 2) ๋ถ„๋ฆฌ ํŒจํ„ด ์ฒ˜๋ฆฌ  
    front_parts, back_parts = [], []  
  
    for bbox, text, conf in ocr_results:  
        clean_text = re.sub(r'[- ]', '', text)  
        if rrn_front_regex.search(clean_text) and len(clean_text) == 6:  
            front_parts.append({'bbox': bbox, 'text': text, 'conf': conf})  
        elif rrn_back_regex.search(clean_text) and len(clean_text) == 7:  
            back_parts.append({'bbox': bbox, 'text': text, 'conf': conf})  
  
    masked_fronts = set()  
    masked_backs = set()  
  
    for i, front in enumerate(front_parts):  
        if i in masked_fronts:  
            continue  
  
        front_xs = [p[0] for p in front['bbox']]  
        front_ys = [p[1] for p in front['bbox']]  
        front_x_max = max(front_xs)  
        front_y_center = (min(front_ys) + max(front_ys)) / 2  
  
        for j, back in enumerate(back_parts):  
            if j in masked_backs:  
                continue  
  
            back_xs = [p[0] for p in back['bbox']]  
            back_ys = [p[1] for p in back['bbox']]  
            back_x_min = min(back_xs)  
            back_y_center = (min(back_ys) + max(back_ys)) / 2  
  
            x_distance = back_x_min - front_x_max  
            y_diff = abs(front_y_center - back_y_center)  
  
            if 0 <= x_distance <= MAX_X_DISTANCE and y_diff <= MAX_Y_DIFFERENCE:  
                combined_bbox = combine_bboxes(front['bbox'], back['bbox'])  
                image = mask_bbox(image, combined_bbox, method=MASK_METHOD)  
                found += 1  
                # ์ค‘๋ณต ๋ฐฉ์ง€  
                masked_fronts.add(i)  
                masked_backs.add(j)  
  
                combined_text = f"{front['text']}-{back['text']}"  
                print(f"[MASKED] Combined (Split) RRN='{combined_text}' | dist={x_distance:.1f} | y_diff={y_diff:.1f}")  
                break  
  
    # ๊ฒฐ๊ณผ ์ €์žฅ  
    fname = os.path.basename(image_path)  
    out_path = os.path.join(OUTPUT_DIR, f"EasyOCR_masked_{MASK_METHOD}_{fname}")  
    cv2.imwrite(out_path, image)  
    print(f"์™„๋ฃŒ: {out_path} (์ด {found}๊ฐœ ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ฒ˜๋ฆฌ)")  
    return found  
  
  
def main():  
    reader = easyocr.Reader(OCR_LANGS, gpu=True)  
  
    img_files = []  
    for ext in ("*.jpg", "*.jpeg", "*.png"):  
        img_files.extend(glob(os.path.join(INPUT_DIR, ext)))  
  
    if not img_files:  
        print(f"[INFO] ์ž…๋ ฅ ํด๋”์— ์ด๋ฏธ์ง€๊ฐ€ ์—†์Šต๋‹ˆ๋‹ค: {INPUT_DIR}")  
        return  
  
    total_masked = 0  
    for img_path in img_files:  
        print(f"\n์ฒ˜๋ฆฌ ์ค‘: {img_path}")  
        total_masked += process_image(img_path, reader)  
  
        # GPU VRAM ์ƒํƒœ ์ถœ๋ ฅ (GB ๋‹จ์œ„)  
        # if torch.cuda.is_available():        #     allocated = torch.cuda.memory_allocated(0) / 1024**3  # GB        #     reserved  = torch.cuda.memory_reserved(0) / 1024**3   # GB        #     print(f"ํ˜„์žฌ VRAM ์‚ฌ์šฉ: {allocated:.3f} GB, ์˜ˆ์•ฝ๋œ VRAM: {reserved:.3f} GB")  
    print(f"\n=== ์ „์ฒด ์™„๋ฃŒ ===\n์ด {len(img_files)}์žฅ ์ฒ˜๋ฆฌ, {total_masked}๊ฐœ ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ฒ˜๋ฆฌ")  
  
  
if __name__ == "__main__":  
    main()

PaddleOCR

import os
import re
import cv2
import numpy as np
from paddleocr import PaddleOCR
from PIL import Image, ImageDraw, ImageFont
# import paddle
 
# GPU ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ๋Ÿ‰ ํ™•์ธ
# mem_alloc = paddle.device.cuda.memory_allocated()
# mem_reserved = paddle.device.cuda.max_memory_reserved()
# print(f"์‹œ์ž‘ ์ „ ๋ฉ”๋ชจ๋ฆฌ: {mem_alloc/1024**2:.2f} MB")
# print(f"์ตœ๋Œ€ ์˜ˆ์•ฝ๋œ ๋ฉ”๋ชจ๋ฆฌ: {mem_reserved / 1024**2:.2f} MB")
 
# ----- OCR ์ดˆ๊ธฐํ™” -----
ocr = PaddleOCR(
    lang='korean',
    use_doc_orientation_classify=False,
    use_doc_unwarping=False,
    use_textline_orientation=False,
    device='gpu:0',
)
 
# ----- ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ •๊ทœ์‹ -----
patterns = [
    re.compile(r"\b\d{6}\s*-\s*\d{7}\b"),
    re.compile(r"\b\d{13}\b")
]
 
# ----- ๋งˆ์Šคํ‚น ํ•จ์ˆ˜ (dt_polys ์ขŒํ‘œ ๊ทธ๋Œ€๋กœ ์‚ฌ์šฉ) -----
def mask_black(img, box):
    pts = np.array(box, dtype=np.int32)
    cv2.fillPoly(img, [pts], color=(0,0,0))
    return img
 
def mask_blur(img, box):
    pts = np.array(box, dtype=np.int32)
    mask = np.zeros(img.shape[:2], dtype=np.uint8)
    cv2.fillPoly(mask, [pts], 255)
    x, y, w, h = cv2.boundingRect(pts)
    roi = img[y:y+h, x:x+w]
    roi_mask = mask[y:y+h, x:x+w]
    if roi.size > 0:
        kernel = (51, 51) # kernel: ๋ธ”๋Ÿฌ ๊ฐ•๋„ ์กฐ์ ˆ (ํ™€์ˆ˜๊ฐ’ ์ž…๋ ฅ, ๊ฐ’์ด ํด์ˆ˜๋ก ๊ฐ•๋„ โ†‘)
        blurred = cv2.GaussianBlur(roi, kernel, 30)
        roi[roi_mask==255] = blurred[roi_mask==255]
        img[y:y+h, x:x+w] = roi
    return img
 
def mask_mosaic(img, box, ratio=0.05): # ratio: ๋ชจ์ž์ดํฌ ๊ฐ•๋„ ์กฐ์ ˆ (๊ฐ’์ด ๋‚ฎ์„์ˆ˜๋ก ๊ฐ•๋„ โ†‘)
    pts = np.array(box, dtype=np.int32)
    mask = np.zeros(img.shape[:2], dtype=np.uint8)
    cv2.fillPoly(mask, [pts], 255)
    x, y, w, h = cv2.boundingRect(pts)
    roi = img[y:y+h, x:x+w]
    roi_mask = mask[y:y+h, x:x+w]
    if roi.size > 0:
        small = cv2.resize(roi, (max(1,int(w*ratio)), max(1,int(h*ratio))), interpolation=cv2.INTER_LINEAR)
        mosaic = cv2.resize(small, (w,h), interpolation=cv2.INTER_NEAREST)
        roi[roi_mask==255] = mosaic[roi_mask==255]
        img[y:y+h, x:x+w] = roi
    return img
 
def mask_replace_text(img, box, text="[ REDACTED ]"):
        pts = np.array(box, dtype=np.int32)
        x, y, w, h = cv2.boundingRect(pts)
        center_x = x + w // 2
        center_y = y + h // 2
        cv2.fillPoly(img, [pts], (255, 255, 255))
        max_len = 0
        angle = 0
        for i in range(4):
            dx = pts[(i+1)%4][0] - pts[i][0]
            dy = pts[(i+1)%4][1] - pts[i][1]
            length = np.hypot(dx, dy)
            if length > max_len:
                max_len = length
                angle = -np.degrees(np.arctan2(dy, dx))
        pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        draw = ImageDraw.Draw(pil_img)
        diagonal = np.hypot(w, h)
        font_size = min(max(int(diagonal * 0.1), 12), h)
        try:
            font = ImageFont.truetype("malgunbd.ttf", font_size) # ๋ง‘์€ ๊ณ ๋”• bold
        except:
            font = ImageFont.load_default()
        bbox_text = draw.textbbox((0, 0), text, font=font)
        text_w = bbox_text[2] - bbox_text[0]
        text_h = bbox_text[3] - bbox_text[1]
        offset = 5 # ํ…์ŠคํŠธ ์œ„๋กœ ์˜ฌ๋ฆด ํ”ฝ์…€ ์ˆ˜
        text_x = center_x - text_w / 2
        text_y = center_y - text_h / 2 - offset
        text_layer = Image.new("RGBA", pil_img.size, (255, 255, 255, 0))
        text_draw = ImageDraw.Draw(text_layer)
        text_draw.text((text_x, text_y), text, font=font, fill=(255, 0, 0, 255))
        rotated_layer = text_layer.rotate(angle, center=(center_x, center_y), resample=Image.BICUBIC)
        pil_img = Image.alpha_composite(pil_img.convert("RGBA"), rotated_layer)
        img[:] = cv2.cvtColor(np.array(pil_img.convert("RGB")), cv2.COLOR_RGB2BGR)
 
        return img
 
masking_methods = {
    "black": mask_black,
    "blur": mask_blur,
    "mosaic": mask_mosaic,
    "replace_text": mask_replace_text
}
 
# ----- ์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ ํ•จ์ˆ˜ -----
def process_image(img_path, save_path, masking="black"):
    img = cv2.imread(img_path)
    if img is None:
        print(f"์ด๋ฏธ์ง€๋ฅผ ์ฝ์„ ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค: {img_path}")
        return
 
    result = ocr.predict(img_path)[0]  # result[0] ์‚ฌ์šฉ
    rec_texts = result['rec_texts']
    #print(rec_texts) # OCRํ•œ ํ…์ŠคํŠธ๋ฅผ ์ถœ๋ ฅํ•ด๋ณด๊ณ  ์‹ถ์œผ๋ฉด ์ฃผ์„์„ ์ง€์šฐ์„ธ์šฉ
    dt_polys = result['dt_polys']
 
    for text, box in zip(rec_texts, dt_polys):
        if any(p.search(text) for p in patterns):
            if masking in masking_methods:
                img = masking_methods[masking](img, box)
 
    cv2.imwrite(save_path, img)
    print(f"์ €์žฅ ์™„๋ฃŒ: {save_path}")
 
# ----- ๋ฉ”์ธ -----
if __name__ == "__main__":
    input_dir = "idc_samples"
    output_dir = "idc_results"
    os.makedirs(output_dir, exist_ok=True)
 
    # ์„ ํƒ: "black", "blur", "mosaic", "replace_text"
    masking_type = "black"
 
    for file in os.listdir(input_dir):
        if file.lower().endswith((".jpg", ".jpeg", ".png")):
            input_path = os.path.join(input_dir, file)
            output_path = os.path.join(output_dir, f"PaddleOCR_{masking_type}_{file}")
            process_image(input_path, output_path, masking=masking_type)
            # gpu ๋ฉ”๋ชจ๋ฆฌ ํ™•์ธ
            # mem_alloc = paddle.device.cuda.memory_allocated()
            # mem_reserved = paddle.device.cuda.max_memory_reserved()
            # print(f"๋ชจ๋ธ ๋กœ๋“œ ํ›„ ๋ฉ”๋ชจ๋ฆฌ: {mem_alloc/1024**2:.2f} MB")
            # print(f"์ตœ๋Œ€ ์˜ˆ์•ฝ๋œ ๋ฉ”๋ชจ๋ฆฌ: {mem_reserved / 1024**2:.2f} MB")
 
# gpu ๋ฉ”๋ชจ๋ฆฌ ํ™•์ธ
# mem_alloc = paddle.device.cuda.memory_allocated()
# mem_reserved = paddle.device.cuda.max_memory_reserved()
# print(f"OCR ์‹คํ–‰ ํ›„ ๋ฉ”๋ชจ๋ฆฌ: {paddle.device.cuda.memory_allocated()/1024**2:.2f} MB")
# print(f"์ตœ๋Œ€ ์˜ˆ์•ฝ๋œ ๋ฉ”๋ชจ๋ฆฌ: {mem_reserved / 1024**2:.2f} MB")