Background
有监督的多模态检索(supervised multi-modal retrieval)中,常用 label 构造相似矩阵 S。
样本集
X
=
{
x
i
}
i
=
1
n
X=\{x_i\}_{i=1}^n
X={xi}i=1n,对应标签集
L
=
{
l
i
}
i
=
1
n
L=\{l_i\}_{i=1}^n
L={li}i=1n。对任意两个样本
x
i
x_i
xi 和
x
j
x_j
xj,若
l
i
l_i
li 和
l
j
l_j
lj 至少有一个共同标签(即
l
i
T
l
j
>
0
l_i^Tl_j>0
liTlj>0),则认为它们相似。计算相似矩阵 S 定义为:
S
i
,
j
=
{
1
,
x
i
与
x
j
相
似
0
,
x
i
与
x
j
不
相
似
S_{i,j}=\begin{cases} 1, & x_i 与 x_j 相似 \\ 0, & x_i 与 x_j 不相似 \end{cases}
Si,j={1,0,xi与xj相似xi与xj不相似
Problem
在 supervised 设置下,一般基于 label 信息计算相似矩阵。想做的是在 UNsupervised 设置下,用一些奇技淫巧构造相似矩阵 S 之后,反求样本 label(使求得的 label 能算出同样的 S),后续用 label 搞事。
Idea 1: Learn-to-Label
像学 hash code 一样,将 label 当成参数,用网络学出来,约束就用 DLFH[1] 那条,或见 DCMH[2]。
code
from tensorflow import ConfigProto, Session
from keras.backend.tensorflow_backend import set_session
config = ConfigProto()
config.gpu_options.allow_growth = True
set_session(Session(config=config))
import os
import argparse
from time import time
import numpy as np
import keras
import keras.backend as K
from keras.callbacks import Callback
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Input, Lambda
np.random.seed(int(time()))
parser = argparse.ArgumentParser()
parser.add_argument('--EPOCH', type=int, default=100)
parser.add_argument('--BATCH', type=int, default=64)
parser.add_argument('--SRC', type=str, default='data')
opt = parser.parse_args()
print(opt)
def load_data(fname):
return np.load(os.path.join(opt.SRC, f'{fname}.npy'))
I_train = load_data('I_train')
I_val = load_data('I_val')
L_train = load_data('L_train')
L_val = load_data('L_val')
EPOCH = opt.EPOCH
BATCH = opt.BATCH
N_CLASS = L_train.shape[-1]
DIM_IMG = I_train.shape[-1]
@K.tf.custom_gradient
def Htanh(x): # hard tanh
def grad(dy):
cond = (x >= -1) & (x <= 1)
zeros = K.zeros_like(dy)
return K.tf.where(cond, dy, zeros)
return K.sign(x), grad
def gen_data(which='train', bat_sz=BATCH):
if which == "train":
Img, Lab = I_train, L_train
elif which == "test":
Img, Lab = I_val, L_val
num = Lab.shape[0]
S = (np.dot(Lab, Lab.T) > 0).astype(np.float32)
while True:
idx = np.random.choice(num, bat_sz)
im, sim, lb = Img[idx], S[idx], Lab[idx]
yield [im, sim], lb
# network
in_img = Input([DIM_IMG], name='image')
in_sim = Input([BATCH], name='similarity_matrix')
x = in_img
x = Dense(N_CLASS)(x)
x = Lambda(Htanh)(x)
l = x
m_train = Model([in_img, in_sim], l, name='train')
clf = Model(in_img, l, name='classifier')
def struct(y_true, y_pred):
s_hat = in_sim * 2. - 1.
theta = 0.5 * K.dot(y_pred, K.transpose(y_pred))
loss = - K.log(0.5 * (1. - s_hat) + s_hat * K.sigmoid(theta) + 1e-9)
return K.sum(loss)
S_val = (np.dot(L_train, L_train.T) > 0).astype(np.int)
tot = L_train.shape[0] ** 2
def test():
l = clf.predict(I_train)
sv = (np.dot(l, l.T) > 0).astype(np.int)
# print('right:', np.sum(sv * S_val))
# print('wrong:', np.sum(sv * (1 - S_val)))
# print('--- S_val ---\n', S_val)
# print(' --- sv ---\n', sv)
print('S_val:', np.sum(S_val)) # 真·相似对数
print('sv:', np.sum(sv)) # 预测的相似对数
print('total:', tot) # 总对数
l_sum = np.sum(l, axis=-1) # 各 label 的 1 个数
L_sum = np.sum(L_train, axis=-1)
print('mean discrepency:', np.mean(np.abs(l_sum - L_sum)))
class moniter(Callback):
def on_epoch_end(self, epoch, logs=None):
if epoch % 10 == 9 or epoch == 0:
print(epoch, "> "*10)
test()
m_train.compile('adam', loss=struct)
gen_train = gen_data('train')
gen_test = gen_data('test')
m_train.fit_generator(gen_train,
steps_per_epoch=I_train.shape[0]//BATCH,
epochs=EPOCH,
callbacks=[moniter()],
validation_data=gen_test,
verbose=0,
validation_steps=I_val.shape[0]//BATCH)
print('--- after ---')
# test()
clf.compile('adam', loss='binary_crossentropy', metrics=['binary_accuracy'])
print(clf.evaluate(I_train, L_train))
Idea 2
通过一个确定的过程构造 label。将相似矩阵 S 看成一幅无向图,贪心地染色(打标签)。
sample
code
from os.path import join
from scipy.spatial.distance import cdist
import numpy as np
def load_data(fname):
return np.load(join('data', f'{fname}.npy'))
# L_train = load_data('L_train')
# L_val = load_data('L_val')
# L_ret = load_data('L_ret')
def construct_label(S):
N = S.shape[0] # 样本数
vis_edge = np.identity(N, dtype=np.int) # np.zeros_like(S) # 已考虑过的边(相似关系)
class_set = {} # class_set[i]:i 类对应的样本 id
class_id = 0 # class id 总数
# 考虑孤立点(只同自己相似的样本)
# 它们自己要拥有一个 label
s_sum = np.sum(S, axis=-1)
for i in range(N):
if s_sum[i] == 1:
vis_edge[i][i] = 0
for now in range(N):
# 邻接点集
neighbour = set()
for v in range(N):
if S[now][v] == 1 and vis_edge[now][v] == 0:
neighbour.add(v)
while len(neighbour) > 0:
v = neighbour.pop()
if vis_edge[now][v] == 1: # 已经考虑过
continue
vis_edge[now][v] = vis_edge[v][now] = 1
elem = {now, v} # 当前类的元素集
for u in neighbour:
# 看 now 的其它邻点能否加入当前类的元素集
# 要求它同其它所有元素都是邻点
can_in = True
for e in elem:
if S[u][e] != 1:
can_in = False
break
if can_in:
elem.add(u)
for e in elem:
vis_edge[u][e] = vis_edge[e][u] = 1
class_set[class_id] = elem
class_id += 1
neighbour = neighbour - elem
label = np.zeros((N, class_id))
for lb in class_set:
for x in class_set[lb]:
label[x][lb] = 1
# 检验是否已考虑所有相似关系
for i in range(S.shape[0]):
for j in range(S.shape[1]):
if S[i][j] != vis_edge[i][j]:
print(f's & vis_edge diff: ({i}, {j})')
return label
if __name__ == '__main__':
# 对应上图 sample
# 样例手打的 label
lab = np.array([
[1, 1, 0, 0, 0, 0, 0, 0, 0], # (1, 2)
[1 ,0, 1, 1, 0, 0, 0, 0, 0], # (1, 3, 4)
[0, 0, 1, 0, 1, 0, 0, 0, 0], # (3, 5)
[0, 0, 0, 0, 1, 0, 0, 0, 0], # (5)
[0, 0, 0, 0, 1, 1, 0, 0, 0], # (5, 6)
[0, 0, 0, 0, 0, 0, 1, 0, 0], # (7)
[0, 0, 0, 1, 0, 6, 1, 1, 0], # (4 ,6, 7, 8)
[0, 0, 0, 0, 0, 0, 0, 1, 0], # (8)
[0, 1, 0, 0, 0, 0, 0, 1, 0], # (2, 4)
[0, 0, 0, 0, 0, 0, 0, 0, 1] # (9),孤立点
])
# 手打 label 算出的 S
sim = (np.dot(lab, lab.T) > 0).astype(np.int)
# 手打的 S
sim_my = np.array([
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
])
## 检验手打 S 和算出的 S 一致
# for r in range(sim_my.shape[0]):
# for c in range(sim_my.shape[1]):
# if sim_my[r][c] != sim[r][c]:
# print(f'my sim diff: ({r}, {c})')
# print('my sim finish')
# print(sim - np.identity(sim.shape[0]))
lab_hat = construct_label(sim)
sim_hat = (np.dot(lab_hat, lab_hat.T) > 0).astype(np.int)
print(lab.shape, lab_hat.shape, '\n', lab_hat)
print(sim.shape, sim_hat.shape, '\n', sim_hat)
# 原 S 和构造 S' 的差别
for r in range(sim_hat.shape[0]):
for c in range(sim_hat.shape[1]):
if sim_hat[r][c] != sim[r][c]:
print(f'sim diff: ({r}, {c})')
print('sim finish')
# 原 L 和构造 L' 的区别
for r in range(lab_hat.shape[0]):
for c in range(lab_hat.shape[1]):
if lab_hat[r][c] != lab[r][c]:
print(f'lab diff: ({r}, {c})')
print('lab finish')
Discussion
第一种效果好像不太好。
第二种好像可以,在我构造的 sample 上虽然构造出的 label 同原来的标号有点对不上,但交换下顺序可以同原来的一样(同构)。
但当用 flickr-25k test set(2k 个 sample)的 label 测试时,发现构造的 label 虽然可以算出原来的 S,但 label 长了很多,即构造了更多的类别:原本只有 24 个,而构造的有 3412 个。而且用 training set 同用 test set 构造的 label 维度不同…显然这种构造法只是个弟弟。
而且第二种是在无向图下测试的,即 S 对称,在 supervised 下用 label 算出来的 S 可以满足,但 unsupervised 下构造的 S 可能是非对称的,如 [3],这时不知道还能不能用。
Future Work
- 更优的构造算法,使得构造的 class 不会多得那么夸张(最好可以同真 label 一样长,甚至可以惟一对应回原来的标签,就像在我上文构造的那个简单 sample 那样)
- 用于非对称 S 的构造法
References
- Discrete Latent Factor Model for Cross-Modal Hashing
- Deep Cross-Modal Hashing
- Unsupervised Generative Adversarial Cross-Modal Hashing