Require isort on all Python files. (#8420)

This commit is contained in:
Jiaming Yuan
2022-11-08 12:59:06 +08:00
committed by GitHub
parent bf8de227a9
commit 0d3da9869c
69 changed files with 290 additions and 187 deletions

View File

@@ -1,13 +1,14 @@
"""Setup xgboost package."""
import logging
import os
import shutil
import subprocess
import logging
from typing import Optional, List
import sys
from platform import system
from setuptools import setup, find_packages, Extension
from setuptools.command import build_ext, sdist, install_lib, install
from typing import List, Optional
from setuptools import Extension, find_packages, setup
from setuptools.command import build_ext, install, install_lib, sdist
# You can't use `pip install .` as pip copies setup.py to a temporary
# directory, parent directory is no longer reachable (isolated build) .

View File

@@ -6,17 +6,28 @@
"""
from abc import ABC
import collections
import os
import pickle
from typing import Callable, List, Optional, Union, Dict, Tuple, TypeVar, cast, Sequence, Any
from abc import ABC
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
import numpy
from . import collective
from .core import Booster, DMatrix, XGBoostError, _get_booster_layer_trees
__all__ = [
"TrainingCallback",
"LearningRateScheduler",

View File

@@ -4,12 +4,12 @@ import json
import logging
import pickle
from enum import IntEnum, unique
from typing import Any, List, Dict
from typing import Any, Dict, List
import numpy as np
from ._typing import _T
from .core import _LIB, _check_call, c_str, py_str, from_pystr_to_cstr
from .core import _LIB, _check_call, c_str, from_pystr_to_cstr, py_str
LOGGER = logging.getLogger("[xgboost.collective]")

View File

@@ -282,7 +282,7 @@ def _has_categorical(booster: "Booster", data: DataType) -> bool:
"""Check whether the booster and input data for prediction contain categorical data.
"""
from .data import _is_pandas_df, _is_cudf_df
from .data import _is_cudf_df, _is_pandas_df
if _is_pandas_df(data) or _is_cudf_df(data):
ft = booster.feature_types
if ft is None:
@@ -355,8 +355,7 @@ def ctypes2cupy(cptr: CNumericPtr, length: int, dtype: Type[np.number]) -> CupyT
"""Convert a ctypes pointer array to a cupy array."""
# pylint: disable=import-error
import cupy
from cupy.cuda.memory import MemoryPointer
from cupy.cuda.memory import UnownedMemory
from cupy.cuda.memory import MemoryPointer, UnownedMemory
CUPY_TO_CTYPES_MAPPING: Dict[Type[np.number], Type[CNumeric]] = {
cupy.float32: ctypes.c_float,
@@ -512,8 +511,7 @@ class DataIter(ABC): # pylint: disable=too-many-instance-attributes
feature_types: Optional[FeatureTypes] = None,
**kwargs: Any,
) -> None:
from .data import dispatch_proxy_set_data
from .data import _proxy_transform
from .data import _proxy_transform, dispatch_proxy_set_data
new, cat_codes, feature_names, feature_types = _proxy_transform(
data,
@@ -732,7 +730,7 @@ class DMatrix: # pylint: disable=too-many-instance-attributes,too-many-public-m
self.handle: Optional[ctypes.c_void_p] = None
return
from .data import dispatch_data_backend, _is_iter
from .data import _is_iter, dispatch_data_backend
if _is_iter(data):
self._init_from_iter(data, enable_categorical)
@@ -1406,10 +1404,10 @@ class QuantileDMatrix(DMatrix):
**meta: Any,
) -> None:
from .data import (
_is_dlpack,
_transform_dlpack,
_is_iter,
SingleBatchInternalIter,
_is_dlpack,
_is_iter,
_transform_dlpack,
)
if _is_dlpack(data):

View File

@@ -278,10 +278,7 @@ def _pandas_feature_info(
enable_categorical: bool,
) -> Tuple[Optional[FeatureNames], Optional[FeatureTypes]]:
import pandas as pd
from pandas.api.types import (
is_sparse,
is_categorical_dtype,
)
from pandas.api.types import is_categorical_dtype, is_sparse
# handle feature names
if feature_names is None and meta is None:
@@ -308,10 +305,10 @@ def _pandas_feature_info(
def is_nullable_dtype(dtype: PandasDType) -> bool:
"""Wether dtype is a pandas nullable type."""
from pandas.api.types import (
is_integer_dtype,
is_bool_dtype,
is_float_dtype,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
)
# dtype: pd.core.arrays.numeric.NumericDtype
@@ -325,6 +322,7 @@ def is_nullable_dtype(dtype: PandasDType) -> bool:
def _pandas_cat_null(data: DataFrame) -> DataFrame:
from pandas.api.types import is_categorical_dtype
# handle category codes and nullable.
cat_columns = [
col
@@ -363,10 +361,7 @@ def _transform_pandas_df(
meta: Optional[str] = None,
meta_type: Optional[NumpyDType] = None,
) -> Tuple[np.ndarray, Optional[FeatureNames], Optional[FeatureTypes]]:
from pandas.api.types import (
is_sparse,
is_categorical_dtype,
)
from pandas.api.types import is_categorical_dtype, is_sparse
if not all(
dtype.name in _pandas_dtype_mapper
@@ -533,8 +528,9 @@ def _from_dt_df(
ptrs[icol] = ctypes.c_void_p(ptr)
else:
# datatable<=0.8.0
from datatable.internal import \
frame_column_data_r # pylint: disable=no-name-in-module
from datatable.internal import (
frame_column_data_r, # pylint: disable=no-name-in-module
)
for icol in range(data.ncols):
ptrs[icol] = frame_column_data_r(data, icol)

View File

@@ -3,8 +3,8 @@
import os
import platform
from typing import List
import sys
from typing import List
class XGBoostLibraryNotFound(Exception):

View File

@@ -2,9 +2,9 @@
# pylint: disable=too-many-branches
# coding: utf-8
"""Plotting Library."""
from io import BytesIO
import json
from typing import Optional, Any
from io import BytesIO
from typing import Any, Optional
import numpy as np
@@ -269,8 +269,8 @@ def plot_tree(
"""
try:
from matplotlib import pyplot as plt
from matplotlib import image
from matplotlib import pyplot as plt
except ImportError as e:
raise ImportError('You must install matplotlib to plot tree') from e

View File

@@ -2,7 +2,7 @@
import logging
import warnings
from enum import IntEnum, unique
from typing import Any, TypeVar, Callable, Optional, List
from typing import Any, Callable, List, Optional, TypeVar
import numpy as np

View File

@@ -10,7 +10,6 @@ import os
import platform
import socket
import sys
import urllib
import zipfile
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
@@ -29,6 +28,7 @@ from typing import (
TypedDict,
Union,
)
from urllib import request
import numpy as np
import pytest
@@ -439,7 +439,7 @@ def get_mq2008(
src = "https://s3-us-west-2.amazonaws.com/xgboost-examples/MQ2008.zip"
target = dpath + "/MQ2008.zip"
if not os.path.exists(target):
urllib.request.urlretrieve(url=src, filename=target)
request.urlretrieve(url=src, filename=target)
with zipfile.ZipFile(target, "r") as f:
f.extractall(path=dpath)

View File

@@ -3,14 +3,13 @@
This script is a variant of dmlc-core/dmlc_tracker/tracker.py,
which is a specialized version for xgboost tasks.
"""
import argparse
import logging
import socket
import struct
import logging
from threading import Thread
import argparse
import sys
from typing import Dict, List, Tuple, Union, Optional, Set
from threading import Thread
from typing import Dict, List, Optional, Set, Tuple, Union
_RingMap = Dict[int, Tuple[int, int]]
_TreeMap = Dict[int, List[int]]

View File

@@ -5,15 +5,26 @@
import copy
import os
import warnings
from typing import Optional, Dict, Any, Union, Tuple, Sequence, List, cast, Iterable
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, cast
import numpy as np
from .callback import TrainingCallback, CallbackContainer, EvaluationMonitor, EarlyStopping
from .core import Booster, DMatrix, XGBoostError, _deprecate_positional_args
from .core import Metric, Objective
from .compat import SKLEARN_INSTALLED, XGBStratifiedKFold, DataFrame
from ._typing import Callable, FPreProcCallable, BoosterParam
from ._typing import BoosterParam, Callable, FPreProcCallable
from .callback import (
CallbackContainer,
EarlyStopping,
EvaluationMonitor,
TrainingCallback,
)
from .compat import SKLEARN_INSTALLED, DataFrame, XGBStratifiedKFold
from .core import (
Booster,
DMatrix,
Metric,
Objective,
XGBoostError,
_deprecate_positional_args,
)
_CVFolds = Sequence["CVPack"]