Fixes for numpy 2.0. (#10252)

This commit is contained in:
Jiaming Yuan
2024-05-07 03:54:32 +08:00
committed by GitHub
parent dcc9639b91
commit 73afef1a6e
12 changed files with 35 additions and 34 deletions

View File

@@ -233,9 +233,9 @@ def _maybe_np_slice(data: DataType, dtype: Optional[NumpyDType]) -> np.ndarray:
if not data.flags.c_contiguous:
data = np.array(data, copy=True, dtype=dtype)
else:
data = np.array(data, copy=False, dtype=dtype)
data = np.asarray(data, dtype=dtype)
except AttributeError:
data = np.array(data, copy=False, dtype=dtype)
data = np.asarray(data, dtype=dtype)
data, dtype = _ensure_np_dtype(data, dtype)
return data
@@ -483,7 +483,7 @@ def pandas_transform_data(data: DataFrame) -> List[np.ndarray]:
if is_pd_cat_dtype(ser.dtype):
return _ensure_np_dtype(
ser.cat.codes.astype(np.float32)
.replace(-1.0, np.NaN)
.replace(-1.0, np.nan)
.to_numpy(na_value=np.nan),
np.float32,
)[0]
@@ -495,7 +495,7 @@ def pandas_transform_data(data: DataFrame) -> List[np.ndarray]:
.combine_chunks()
.dictionary_encode()
.indices.astype(np.float32)
.replace(-1.0, np.NaN)
.replace(-1.0, np.nan)
)
def nu_type(ser: pd.Series) -> np.ndarray:

View File

@@ -437,7 +437,7 @@ def make_categorical(
index = rng.randint(
low=0, high=n_samples - 1, size=int(n_samples * sparsity)
)
df.iloc[index, i] = np.NaN
df.iloc[index, i] = np.nan
if is_categorical_dtype(df.dtypes[i]):
assert n_categories == np.unique(df.dtypes[i].categories).size

View File

@@ -66,7 +66,7 @@ def check_uneven_nan(client: Client, tree_method: str, n_workers: int) -> None:
X = pd.DataFrame({"a": range(10000), "b": range(10000, 0, -1)})
y = pd.Series([*[0] * 5000, *[1] * 5000])
X["a"][:3000:1000] = np.NaN
X["a"][:3000:1000] = np.nan
client.wait_for_workers(n_workers=n_workers)