python:使用 OpenAI CLIP 模型进行图像与文本的语义匹配,并用彩虹色带可视化 CLIP 模型的相似度矩阵

发布于:2025-06-22 ⋅ 阅读:(19) ⋅ 点赞:(0)

作者:CSDN @ _养乐多_

本文将介绍如何使用 OpenAI 的 CLIP 模型来实现图像与文本之间的语义匹配。代码使用 Python 语言,加载多个图像与类别文本,并通过计算余弦相似度判断每张图片最匹配的文本标签。

结果如下图所示,

在这里插入图片描述



一、什么是 CLIP?

CLIP(Contrastive Language–Image Pre-training)是由 OpenAI 提出的模型,它能够将图像和文本映射到同一个向量空间,从而实现跨模态的理解能力。CLIP 的强大之处在于它无需专门为某个任务训练,也能胜任广泛的图像识别和文本匹配任务。

二、准备工作

准备好一个包含若干图像的文件夹(如 ./images)。如"cat.jpg", “dog.jpg”, “car.jpg”, “mountain.jpg”, “food.jpg”。

安装第三方库

pip install torch torchvision ftfy regex tqdm
pip install git+https://github.com/openai/CLIP.git

三、代码

import torch
import clip
from PIL import Image
import os
import matplotlib.pyplot as plt
import numpy as np
from typing import List

def load_images(image_paths: List[str]) -> List[Image.Image]:
    """
    加载并转换图像为RGB格式
    
    参数:
        image_paths: 图像文件路径列表
    
    返回:
        PIL Image对象列表
    """
    return [Image.open(path).convert("RGB") for path in image_paths]

def compute_similarity(model, preprocess, images, texts, device):
    """
    使用CLIP模型计算图像和文本之间的余弦相似度
    
    参数:
        model: CLIP模型实例
        preprocess: 图像预处理函数
        images: PIL Image列表
        texts: 文本标签列表
        device: 设备名称(cpu或cuda)
    
    返回:
        numpy数组,形状为 (图像数, 文本类别数),值为相似度分数
    """
    # 预处理图像,转为tensor并堆叠到一个batch,放到指定设备
    image_tensors = torch.stack([preprocess(img) for img in images]).to(device)
    # 对文本进行tokenize并放到设备
    text_tokens = clip.tokenize(texts).to(device)

    with torch.no_grad():
        # 编码图像和文本特征
        image_features = model.encode_image(image_tensors)
        text_features = model.encode_text(text_tokens)

    # 对特征做归一化,方便计算余弦相似度
    image_features /= image_features.norm(dim=-1, keepdim=True)
    text_features /= text_features.norm(dim=-1, keepdim=True)

    # 计算余弦相似度矩阵 (图像特征 @ 文本特征转置)
    similarity = image_features @ text_features.T
    return similarity.cpu().numpy()

def visualize_similarity_matrix_with_images(images, image_paths, categories, similarity):
    """
    可视化相似度矩阵,左侧显示对应图片,右侧按类别显示彩虹色相似度色块,色块内标注相似度数值
    
    参数:
        images: PIL Image列表
        image_paths: 图像路径列表(用于显示文件名)
        categories: 文本类别标签列表
        similarity: numpy数组,图像与文本类别的相似度矩阵
    """
    num_images = len(images)
    num_categories = len(categories)
    
    # 计算相似度最小和最大值,用于归一化色带映射
    min_val = similarity.min()
    max_val = similarity.max()
    
    # 创建子图,列数比类别数多1列,用于放图片
    fig, axs = plt.subplots(num_images, num_categories + 1, figsize=(2 * (num_categories + 1), 2 * num_images))
    
    if num_images == 1:
        axs = axs.reshape(1, -1)  # 保证axs是2D数组,方便统一处理
    
    # 去除子图间距,保证格子紧密无缝隙
    plt.subplots_adjust(wspace=0, hspace=0)
    
    # 第一行显示类别标题(只显示文字,不显示轴)
    for j in range(num_categories):
        axs[0, j+1].set_title(categories[j], fontsize=11, pad=6)
        axs[0, j+1].axis('off')
    axs[0, 0].axis('off')  # 左上角空白
    
    # 遍历每张图片和每个类别
    for i in range(num_images):
        # 左侧显示图片,不显示坐标轴
        axs[i, 0].imshow(images[i])
        axs[i, 0].axis('off')
        # 图片文件名作为标题
        axs[i, 0].set_title(os.path.basename(image_paths[i]), fontsize=8, pad=4)
        
        for j in range(num_categories):
            sim_val = similarity[i, j]
            # 将相似度归一化到0-1区间,用于颜色映射
            norm_val = (sim_val - min_val) / (max_val - min_val) if max_val > min_val else 0
            # 使用彩虹色带(rainbow)映射相似度
            color = plt.cm.rainbow(norm_val)
            # 显示彩色方块
            axs[i, j+1].imshow(np.ones((20,20,3)) * color[:3])
            axs[i, j+1].axis('off')
            
            # 根据颜色亮度选择文字颜色,保证对比度,易读性
            brightness = 0.299 * color[0] + 0.587 * color[1] + 0.114 * color[2]
            font_color = 'black' if brightness > 0.6 else 'white'
            # 在色块中心写入相似度数值
            axs[i, j+1].text(10, 10, f"{sim_val:.2f}", ha='center', va='center', fontsize=9, color=font_color)
    
    # 自动紧凑布局,去除多余边距
    plt.tight_layout(pad=0)
    plt.show()


def main():
    # 选择设备,优先cuda,否则cpu
    device = "cuda" if torch.cuda.is_available() else "cpu"
    # 加载CLIP模型和对应的图像预处理函数
    model, preprocess = clip.load("ViT-B/32", device=device)

    # 指定图像文件夹路径
    image_folder = "./images"
    # 遍历文件夹,筛选常见图片格式路径
    image_paths = [os.path.join(image_folder, f) for f in os.listdir(image_folder)
                   if f.lower().endswith(('jpg', 'png', 'jpeg'))]
    # 读取所有图像
    images = load_images(image_paths)

    # 设定要匹配的文本类别
    categories = ["cat", "dog", "car", "mountain", "food"]

    # 计算图像与文本类别的相似度矩阵
    similarity = compute_similarity(model, preprocess, images, categories, device)

    # 输出每张图片匹配度最高的类别及相似度
    for i, path in enumerate(image_paths):
        best_idx = similarity[i].argmax()
        print(f"Image: {os.path.basename(path)} => Best match: '{categories[best_idx]}' (Similarity: {similarity[i][best_idx]:.4f})")

    # 可视化相似度矩阵
    visualize_similarity_matrix_with_images(images, image_paths, categories, similarity)

if __name__ == "__main__":
    main()

谢谢各位读者对本人文章的关注和支持!

四、代码2

和第三部分一样,只是可视化效果不一样。

在这里插入图片描述

import torch
import clip
from PIL import Image
import os
import matplotlib.pyplot as plt
import seaborn as sns
from typing import List

def load_images(image_paths: List[str]) -> List[Image.Image]:
    return [Image.open(path).convert("RGB") for path in image_paths]

def compute_similarity(model, preprocess, images, texts, device):
    image_tensors = torch.stack([preprocess(img) for img in images]).to(device)
    text_tokens = clip.tokenize(texts).to(device)

    with torch.no_grad():
        image_features = model.encode_image(image_tensors)
        text_features = model.encode_text(text_tokens)

    image_features /= image_features.norm(dim=-1, keepdim=True)
    text_features /= text_features.norm(dim=-1, keepdim=True)

    similarity = image_features @ text_features.T
    return similarity.cpu().numpy()

def visualize_similarity_matrix(image_paths, categories, similarity):
    plt.figure(figsize=(12, max(4, len(image_paths) * 0.5)))
    sns.heatmap(similarity, xticklabels=categories, yticklabels=[os.path.basename(p) for p in image_paths],
                cmap="YlGnBu", annot=True, fmt=".2f")
    plt.xlabel("Categories")
    plt.ylabel("Images")
    plt.title("Image-Text Similarity Matrix")
    plt.tight_layout()
    plt.show()

def main():
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model, preprocess = clip.load("ViT-B/32", device=device)

    image_folder = "./images"
    image_paths = [os.path.join(image_folder, f) for f in os.listdir(image_folder)
                   if f.lower().endswith(('jpg', 'png', 'jpeg'))]
    images = load_images(image_paths)

    categories = ["cat", "dog", "car", "mountain", "food"]

    similarity = compute_similarity(model, preprocess, images, categories, device)

    # 输出每张图片与文本类别的相似度
    for i, path in enumerate(image_paths):
        best_idx = similarity[i].argmax()
        print(f"Image: {os.path.basename(path)} => Best match: '{categories[best_idx]}' (Similarity: {similarity[i][best_idx]:.4f})")

    visualize_similarity_matrix(image_paths, categories, similarity)

if __name__ == "__main__":
    main()


网站公告

今日签到

点亮在社区的每一天
去签到