我已经实现了一个函数,用于将np.dot 应用于从内存映射数组显式读入核心内存的块:
import numpy as np
def _block_slices(dim_size, block_size):
"""Generator that yields slice objects for indexing into
sequential blocks of an array along a particular axis
"""
count = 0
while True:
yield slice(count, count + block_size, 1)
count += block_size
if count > dim_size:
raise StopIteration
def blockwise_dot(A, B, max_elements=int(2**27), out=None):
"""
Computes the dot product of two matrices in a block-wise fashion.
Only blocks of `A` with a maximum size of `max_elements` will be
processed simultaneously.
"""
m, n = A.shape
n1, o = B.shape
if n1 != n:
raise ValueError('matrices are not aligned')
if A.flags.f_contiguous:
# prioritize processing as many columns of A as possible
max_cols = max(1, max_elements / m)
max_rows = max_elements / max_cols
else:
# prioritize processing as many rows of A as possible
max_rows = max(1, max_elements / n)
max_cols = max_elements / max_rows
if out is None:
out = np.empty((m, o), dtype=np.result_type(A, B))
elif out.shape != (m, o):
raise ValueError('output array has incorrect dimensions')
for mm in _block_slices(m, max_rows):
out[mm, :] = 0
for nn in _block_slices(n, max_cols):
A_block = A[mm, nn].copy() # copy to force a read
out[mm, :] += np.dot(A_block, B[nn, :])
del A_block
return out
然后我做了一些基准测试,将我的blockwise_dot 函数与直接应用于内存映射数组的普通np.dot 函数进行比较(请参阅下面的基准测试脚本)。我正在使用与 OpenBLAS v0.2.9.rc1 链接的 numpy 1.9.0.dev-205598b(从源代码编译)。该机器是运行 Ubuntu 13.10 的四核笔记本电脑,具有 8GB RAM 和 SSD,我已禁用交换文件。
结果
正如@Bi Rico 预测的那样,相对于A 的维度,计算点积所需的时间非常O(n)。对 A 的缓存块进行操作比仅在整个内存映射数组上调用普通的 np.dot 函数具有巨大的性能提升:
令人惊讶的是,它对正在处理的块的大小不敏感 - 处理 1GB、2GB 或 4GB 块中的数组所花费的时间几乎没有差别。我的结论是,无论缓存 np.memmap 数组本机实现什么,它似乎都不是计算点积的最佳选择。
其他问题
不得不手动实现这种缓存策略仍然有点痛苦,因为我的代码可能必须在具有不同物理内存量和可能不同操作系统的机器上运行。出于这个原因,我仍然对是否有办法控制内存映射数组的缓存行为以提高np.dot 的性能感兴趣。
我在运行基准测试时注意到了一些奇怪的内存处理行为 - 当我在整个 A 上调用 np.dot 时,我从未见过我的 Python 进程的驻留集大小超过大约 3.8GB,即使我有大约 7.5GB 的可用 RAM。这让我怀疑np.memmap 数组允许占用的物理内存量有一些限制——我之前假设它会使用操作系统允许它抓取的任何 RAM。就我而言,能够提高此限制可能非常有益。
是否有人对 np.memmap 数组的缓存行为有任何进一步的了解,有助于解释这一点?
基准测试脚本
def generate_random_mmarray(shape, fp, max_elements):
A = np.memmap(fp, dtype=np.float32, mode='w+', shape=shape)
max_rows = max(1, max_elements / shape[1])
max_cols = max_elements / max_rows
for rr in _block_slices(shape[0], max_rows):
for cc in _block_slices(shape[1], max_cols):
A[rr, cc] = np.random.randn(*A[rr, cc].shape)
return A
def run_bench(n_gigabytes=np.array([16]), max_block_gigabytes=6, reps=3,
fpath='temp_array'):
"""
time C = A * B, where A is a big (n, n) memory-mapped array, and B and C are
(n, o) arrays resident in core memory
"""
standard_times = []
blockwise_times = []
differences = []
nbytes = n_gigabytes * 2 ** 30
o = 64
# float32 elements
max_elements = int((max_block_gigabytes * 2 ** 30) / 4)
for nb in nbytes:
# float32 elements
n = int(np.sqrt(nb / 4))
with open(fpath, 'w+') as f:
A = generate_random_mmarray((n, n), f, (max_elements / 2))
B = np.random.randn(n, o).astype(np.float32)
print "\n" + "-"*60
print "A: %s\t(%i bytes)" %(A.shape, A.nbytes)
print "B: %s\t\t(%i bytes)" %(B.shape, B.nbytes)
best = np.inf
for _ in xrange(reps):
tic = time.time()
res1 = np.dot(A, B)
t = time.time() - tic
best = min(best, t)
print "Normal dot:\t%imin %.2fsec" %divmod(best, 60)
standard_times.append(best)
best = np.inf
for _ in xrange(reps):
tic = time.time()
res2 = blockwise_dot(A, B, max_elements=max_elements)
t = time.time() - tic
best = min(best, t)
print "Block-wise dot:\t%imin %.2fsec" %divmod(best, 60)
blockwise_times.append(best)
diff = np.linalg.norm(res1 - res2)
print "L2 norm of difference:\t%g" %diff
differences.append(diff)
del A, B
del res1, res2
os.remove(fpath)
return (np.array(standard_times), np.array(blockwise_times),
np.array(differences))
if __name__ == '__main__':
n = np.logspace(2,5,4,base=2)
standard_times, blockwise_times, differences = run_bench(
n_gigabytes=n,
max_block_gigabytes=4)
np.savez('bench_results', standard_times=standard_times,
blockwise_times=blockwise_times, differences=differences)