Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 37 additions & 0 deletions test/quantization/core/experimental/test_quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,43 @@ def test_dequantize_quantize_rand_b6(self):

self.assertTrue(torch.equal(original_input, result))

r""" Tests for correct dimensions in dequantize_apot result
on random 3-dim tensor with random dimension sizes
and hardcoded values for b, k.
Dequant an input tensor and verify that
dimensions are same as input.
* tensor2quantize: Tensor
* b: 4
* k: 2
"""
def test_dequantize_dim(self):
# make observer
observer = APoTObserver(4, 2)

# generate random size of tensor2quantize between 1 -> 20
size1 = random.randint(1, 20)
size2 = random.randint(1, 20)
size3 = random.randint(1, 20)

# make tensor2quantize: random fp values between 0 -> 1000
tensor2quantize = 1000 * torch.rand(size1, size2, size3, dtype=torch.float)

observer.forward(tensor2quantize)

alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False)

# make mock apot_tensor
original_apot = quantize_APoT(tensor2quantize=tensor2quantize,
alpha=alpha,
gamma=gamma,
quantization_levels=quantization_levels,
level_indices=level_indices)

# dequantize apot_tensor
dequantize_result = dequantize_APoT(apot_tensor=original_apot)

self.assertEqual(original_apot.data.size(), dequantize_result.size())

def test_q_apot_alpha(self):
with self.assertRaises(NotImplementedError):
APoTQuantizer.q_apot_alpha(self)
Expand Down
15 changes: 9 additions & 6 deletions torch/ao/quantization/experimental/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,15 +57,18 @@ def quantize(self, tensor2quantize: Tensor):
result: fp representation of input Tensor
"""
def dequantize(self, apot_tensor) -> Tensor:
apot_tensor_data = apot_tensor.data
orig_size = apot_tensor.data.size()
apot_tensor_data = apot_tensor.data.flatten()

print(apot_tensor_data)

# map apot_to_float over tensor2quantize elements
result_temp = np.empty(apot_tensor_data.size())
for ele in apot_tensor_data:
new_ele = apot_to_float(ele, self.quantization_levels, self.level_indices)
np.append(result_temp, new_ele)
result_temp = np.empty(shape=apot_tensor_data.size())
for i in range(len(apot_tensor_data)):
new_ele = apot_to_float(apot_tensor_data[i], self.quantization_levels, self.level_indices)
result_temp[i] = new_ele

result = torch.from_numpy(result_temp).int()
result = torch.from_numpy(result_temp).reshape(orig_size)

return result

Expand Down