faiss_par_add.py 4.61 KB
Newer Older
Lawrence McAfee's avatar
Retro  
Lawrence McAfee committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
# Copyright (c) 2023, NVIDIA CORPORATION.  All rights reserved.

"""Multi-process & multi-node version of Faiss's index.add().

This class inherits from FaissBaseIndex, and optimizes the 'add()' method by
making it multi-node and multi-process, with bit-wise equivalence to
FaissBaseIndex. This allows 'add()' to scale out to very large datasets, since
the vast majority of the computational effort is embarrassingly parallel.
"""

import numpy as np
import os
import shutil
import torch
from tqdm import tqdm

from megatron import get_retro_args, print_rank_0
from tools.bert_embedding import BertEmbedder
from tools.bert_embedding.utils import get_missing_blocks_by_rank
from tools.retro.external_libs import faiss, h5py
from tools.retro.index.utils import get_added_codes_dir, get_added_code_paths

from .faiss_base import FaissBaseIndex


class FaissParallelAddIndex(FaissBaseIndex):

    def encode_block(self, index, embedder, text_dataset, block):
        '''Encode sub-dataset block, to be later added to index.

        Encode the data subset, generally in blocks of 1M vectors each. For
        each block, the empty/trained index is loaded, codes are computed
        via index.sa_encode(), and the resulting codes are saved to disk.
        '''

        args = get_retro_args()

        # Embed block.
        embeddings = self.embed_text_dataset_block(
            embedder,
            text_dataset,
            block["range"],
        )

        # Encode block.
        print_rank_0("encode.")
        codes = index.sa_encode(embeddings)

        # Save neighbors.
        print_rank_0("save codes.")
        os.makedirs(os.path.dirname(block["path"]), exist_ok=True)
        with h5py.File(block["path"], "w") as f:
            f.create_dataset("data", data=codes)

    def encode(self, text_dataset):
        '''Encode text dataset, to be later added to index.'''

        args = get_retro_args()
        codes_dir = get_added_codes_dir()

        # Index.
        index = self.get_empty_index()

        # Bert embedder.
        embedder = BertEmbedder(args.retro_bert_batch_size,
                                args.retro_bert_max_chunk_length,
                                args.bert_embedder_type)

        # Missing code blocks.
        def validate(f):
            assert len(f["data"].shape) == 2
        n_missing_blocks, missing_code_blocks = get_missing_blocks_by_rank(
            codes_dir,
            len(text_dataset),
            args.retro_block_size,
            validate=validate,
        )

        # Encode each block.
        for block_index, block in enumerate(missing_code_blocks):

            if block is not None:

                # Progress.
                print_rank_0("encode block %d / %d ... %s." % (
                    block_index,
                    len(missing_code_blocks),
                    block["path"],
                ))

                # Query block neighbors.
                self.encode_block(index, embedder, text_dataset, block)

            # Synchronize progress across all ranks. (for easier observation)
            print_rank_0(" > waiting for other ranks to finish block.")
            torch.distributed.barrier()

    def add_codes(self):

        if torch.distributed.get_rank() != 0:
            return

        added_index_path = self.get_added_index_path()
        if os.path.exists(added_index_path):
            return

        # Index.
        print_rank_0("read empty index.")
        index = self.get_empty_index()
        index_ivf = faiss.extract_index_ivf(index)

        # Add codes.
        print_rank_0("add codes.")
        code_paths = get_added_code_paths()
        for code_path in tqdm(code_paths, "add codes"):
            with h5py.File(code_path) as f:
                codes = np.copy(f["data"])
                index_ivf.add_sa_codes(codes)

        # Update index's ntotal.
        index.ntotal = index_ivf.ntotal

        # Write index.
        print_rank_0("write added index.")
        faiss.write_index(index, added_index_path)

    def remove_codes(self):
        '''Remove added codes after adding to index.'''
        if torch.distributed.get_rank() != 0:
            return
        assert os.path.isfile(self.get_added_index_path())
        shutil.rmtree(get_added_codes_dir(), ignore_errors=True)

    def add(self, text_dataset):

        # Check if index already exists.
        if not os.path.isfile(self.get_added_index_path()):

            # Encode chunks.
            self.encode(text_dataset)

            # Add codes to index.
            self.add_codes()

        # Wait for (single-process) adding to complete.
        torch.distributed.barrier()

        # Remove codes.
        self.remove_codes()