-
Notifications
You must be signed in to change notification settings - Fork 0
/
IndexScalarQuantizer.h
174 lines (118 loc) · 4.61 KB
/
IndexScalarQuantizer.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
/**
* Copyright (c) 2015-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD+Patents license found in the
* LICENSE file in the root directory of this source tree.
*/
#ifndef FAISS_INDEX_SCALAR_QUANTIZER_H
#define FAISS_INDEX_SCALAR_QUANTIZER_H
#include <stdint.h>
#include <vector>
#include "IndexIVF.h"
namespace faiss {
/**
* The uniform quantizer has a range [vmin, vmax]. The range can be
* the same for all dimensions (uniform) or specific per dimension
* (default).
*/
struct ScalarQuantizer {
enum QuantizerType {
QT_8bit, ///< 8 bits per component
QT_4bit, ///< 4 bits per component
QT_8bit_uniform, ///< same, shared range for all dimensions
QT_4bit_uniform,
};
QuantizerType qtype;
/** The uniform encoder can estimate the range of representable
* values of the unform encoder using different statistics. Here
* rs = rangestat_arg */
// rangestat_arg.
enum RangeStat {
RS_minmax, ///< [min - rs*(max-min), max + rs*(max-min)]
RS_meanstd, ///< [mean - std * rs, mean + std * rs]
RS_quantiles, ///< [Q(rs), Q(1-rs)]
RS_optim, ///< alternate optimization of reconstruction error
};
RangeStat rangestat;
float rangestat_arg;
/// dimension of input vectors
size_t d;
/// bytes per vector
size_t code_size;
/// trained values (including the range)
std::vector<float> trained;
ScalarQuantizer (size_t d, QuantizerType qtype);
ScalarQuantizer ();
void train (size_t n, const float *x);
/// same as compute_code for several vectors
void compute_codes (const float * x,
uint8_t * codes,
size_t n) const ;
/// decode a vector from a given code (or n vectors if third argument)
void decode (const uint8_t *code, float *x, size_t n) const;
// fast, non thread-safe way of computing vector-to-code and
// code-to-code distances.
struct DistanceComputer {
/// vector-to-code distance computation
virtual float compute_distance (const float *x,
const uint8_t *code) = 0;
/// code-to-code distance computation
virtual float compute_code_distance (const uint8_t *code1,
const uint8_t *code2) = 0;
virtual ~DistanceComputer () {}
};
DistanceComputer *get_distance_computer (MetricType metric = METRIC_L2)
const;
};
struct IndexScalarQuantizer: Index {
/// Used to encode the vectors
ScalarQuantizer sq;
/// Codes. Size ntotal * pq.code_size
std::vector<uint8_t> codes;
size_t code_size;
/** Constructor.
*
* @param d dimensionality of the input vectors
* @param M number of subquantizers
* @param nbits number of bit per subvector index
*/
IndexScalarQuantizer (int d,
ScalarQuantizer::QuantizerType qtype,
MetricType metric = METRIC_L2);
IndexScalarQuantizer ();
void train(idx_t n, const float* x) override;
void add(idx_t n, const float* x) override;
void search(
idx_t n,
const float* x,
idx_t k,
float* distances,
idx_t* labels) const override;
void reset() override;
void reconstruct_n(idx_t i0, idx_t ni, float* recons) const override;
void reconstruct(idx_t key, float* recons) const override;
};
/** An IVF implementation where the components of the residuals are
* encoded with a scalar uniform quantizer. All distance computations
* are asymmetric, so the encoded vectors are decoded and approximate
* distances are computed.
*/
struct IndexIVFScalarQuantizer: IndexIVF {
ScalarQuantizer sq;
IndexIVFScalarQuantizer(Index *quantizer, size_t d, size_t nlist,
ScalarQuantizer::QuantizerType qtype,
MetricType metric = METRIC_L2);
IndexIVFScalarQuantizer();
void train_residual(idx_t n, const float* x) override;
void add_with_ids(idx_t n, const float* x, const long* xids) override;
void search_preassigned (idx_t n, const float *x, idx_t k,
const idx_t *assign,
const float *centroid_dis,
float *distances, idx_t *labels,
bool store_pairs) const override;
void reconstruct_from_offset (long list_no, long offset,
float* recons) const override;
};
}
#endif