Unverified Commit e12d200c authored by Aleksei Nikiforov's avatar Aleksei Nikiforov Committed by GitHub
Browse files

S390x big endian fixes (#8149)

Fixes for multiple tests on s390x
parent 526ec93e
...@@ -2,6 +2,7 @@ import math ...@@ -2,6 +2,7 @@ import math
import os import os
import random import random
import re import re
import sys
from functools import partial from functools import partial
import numpy as np import numpy as np
...@@ -614,7 +615,7 @@ class TestToPil: ...@@ -614,7 +615,7 @@ class TestToPil:
img_data_short = torch.ShortTensor(1, 4, 4).random_() img_data_short = torch.ShortTensor(1, 4, 4).random_()
expected_output = img_data_short.numpy() expected_output = img_data_short.numpy()
yield img_data_short, expected_output, "I;16" yield img_data_short, expected_output, "I;16" if sys.byteorder == "little" else "I;16B"
img_data_int = torch.IntTensor(1, 4, 4).random_() img_data_int = torch.IntTensor(1, 4, 4).random_()
expected_output = img_data_int.numpy() expected_output = img_data_int.numpy()
...@@ -631,7 +632,7 @@ class TestToPil: ...@@ -631,7 +632,7 @@ class TestToPil:
img_data_short = torch.ShortTensor(4, 4).random_() img_data_short = torch.ShortTensor(4, 4).random_()
expected_output = img_data_short.numpy() expected_output = img_data_short.numpy()
yield img_data_short, expected_output, "I;16" yield img_data_short, expected_output, "I;16" if sys.byteorder == "little" else "I;16B"
img_data_int = torch.IntTensor(4, 4).random_() img_data_int = torch.IntTensor(4, 4).random_()
expected_output = img_data_int.numpy() expected_output = img_data_int.numpy()
...@@ -662,7 +663,7 @@ class TestToPil: ...@@ -662,7 +663,7 @@ class TestToPil:
[ [
(torch.Tensor(4, 4, 1).uniform_().numpy(), "L"), (torch.Tensor(4, 4, 1).uniform_().numpy(), "L"),
(torch.ByteTensor(4, 4, 1).random_(0, 255).numpy(), "L"), (torch.ByteTensor(4, 4, 1).random_(0, 255).numpy(), "L"),
(torch.ShortTensor(4, 4, 1).random_().numpy(), "I;16"), (torch.ShortTensor(4, 4, 1).random_().numpy(), "I;16" if sys.byteorder == "little" else "I;16B"),
(torch.IntTensor(4, 4, 1).random_().numpy(), "I"), (torch.IntTensor(4, 4, 1).random_().numpy(), "I"),
], ],
) )
...@@ -744,7 +745,7 @@ class TestToPil: ...@@ -744,7 +745,7 @@ class TestToPil:
[ [
(torch.Tensor(4, 4).uniform_().numpy(), "L"), (torch.Tensor(4, 4).uniform_().numpy(), "L"),
(torch.ByteTensor(4, 4).random_(0, 255).numpy(), "L"), (torch.ByteTensor(4, 4).random_(0, 255).numpy(), "L"),
(torch.ShortTensor(4, 4).random_().numpy(), "I;16"), (torch.ShortTensor(4, 4).random_().numpy(), "I;16" if sys.byteorder == "little" else "I;16B"),
(torch.IntTensor(4, 4).random_().numpy(), "I"), (torch.IntTensor(4, 4).random_().numpy(), "I"),
], ],
) )
......
...@@ -510,15 +510,25 @@ def read_sn3_pascalvincent_tensor(path: str, strict: bool = True) -> torch.Tenso ...@@ -510,15 +510,25 @@ def read_sn3_pascalvincent_tensor(path: str, strict: bool = True) -> torch.Tenso
# read # read
with open(path, "rb") as f: with open(path, "rb") as f:
data = f.read() data = f.read()
# parse # parse
magic = get_int(data[0:4]) if sys.byteorder == "little":
nd = magic % 256 magic = get_int(data[0:4])
ty = magic // 256 nd = magic % 256
ty = magic // 256
else:
nd = get_int(data[0:1])
ty = get_int(data[1:2]) + get_int(data[2:3]) * 256 + get_int(data[3:4]) * 256 * 256
assert 1 <= nd <= 3 assert 1 <= nd <= 3
assert 8 <= ty <= 14 assert 8 <= ty <= 14
torch_type = SN3_PASCALVINCENT_TYPEMAP[ty] torch_type = SN3_PASCALVINCENT_TYPEMAP[ty]
s = [get_int(data[4 * (i + 1) : 4 * (i + 2)]) for i in range(nd)] s = [get_int(data[4 * (i + 1) : 4 * (i + 2)]) for i in range(nd)]
if sys.byteorder == "big":
for i in range(len(s)):
s[i] = int.from_bytes(s[i].to_bytes(4, byteorder="little"), byteorder="big", signed=False)
parsed = torch.frombuffer(bytearray(data), dtype=torch_type, offset=(4 * (nd + 1))) parsed = torch.frombuffer(bytearray(data), dtype=torch_type, offset=(4 * (nd + 1)))
# The MNIST format uses the big endian byte order, while `torch.frombuffer` uses whatever the system uses. In case # The MNIST format uses the big endian byte order, while `torch.frombuffer` uses whatever the system uses. In case
......
import math import math
import numbers import numbers
import sys
import warnings import warnings
from enum import Enum from enum import Enum
from typing import Any, List, Optional, Tuple, Union from typing import Any, List, Optional, Tuple, Union
...@@ -162,7 +163,7 @@ def to_tensor(pic) -> Tensor: ...@@ -162,7 +163,7 @@ def to_tensor(pic) -> Tensor:
return torch.from_numpy(nppic).to(dtype=default_float_dtype) return torch.from_numpy(nppic).to(dtype=default_float_dtype)
# handle PIL Image # handle PIL Image
mode_to_nptype = {"I": np.int32, "I;16": np.int16, "F": np.float32} mode_to_nptype = {"I": np.int32, "I;16" if sys.byteorder == "little" else "I;16B": np.int16, "F": np.float32}
img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True)) img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True))
if pic.mode == "1": if pic.mode == "1":
...@@ -285,7 +286,7 @@ def to_pil_image(pic, mode=None): ...@@ -285,7 +286,7 @@ def to_pil_image(pic, mode=None):
if npimg.dtype == np.uint8: if npimg.dtype == np.uint8:
expected_mode = "L" expected_mode = "L"
elif npimg.dtype == np.int16: elif npimg.dtype == np.int16:
expected_mode = "I;16" expected_mode = "I;16" if sys.byteorder == "little" else "I;16B"
elif npimg.dtype == np.int32: elif npimg.dtype == np.int32:
expected_mode = "I" expected_mode = "I"
elif npimg.dtype == np.float32: elif npimg.dtype == np.float32:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment