Skip to content

Commit

Permalink
chore: remove Python 3.8 support (#15)
Browse files Browse the repository at this point in the history
  • Loading branch information
jkanche authored Dec 20, 2024
1 parent c2c04da commit 4ce41f3
Show file tree
Hide file tree
Showing 9 changed files with 103 additions and 121 deletions.
75 changes: 38 additions & 37 deletions .github/workflows/pypi-publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,43 +9,44 @@ on:

jobs:
build:

runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest tox
# - name: Lint with flake8
# run: |
# # stop the build if there are Python syntax errors or undefined names
# flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
# # flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with tox
run: |
tox
- name: Build docs
run: |
tox -e docs
- run: touch ./docs/_build/html/.nojekyll
- name: GH Pages Deployment
uses: JamesIves/github-pages-deploy-action@4.1.3
with:
branch: gh-pages # The branch the action should deploy to.
folder: ./docs/_build/html
clean: true # Automatically remove deleted files from the deploy branch
- name: Build Project and Publish
run: |
python -m tox -e clean,build
- name: Publish package
uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
with:
user: __token__
password: ${{ secrets.PYPI_PASSWORD }}
- uses: actions/checkout@v4

- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: 3.11

- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install tox
- name: Test with tox
run: |
tox
- name: Build docs
run: |
tox -e docs
- run: touch ./docs/_build/html/.nojekyll

- name: GH Pages Deployment
uses: JamesIves/github-pages-deploy-action@v4
with:
branch: gh-pages # The branch the action should deploy to.
folder: ./docs/_build/html
clean: true # Automatically remove deleted files from the deploy branch

- name: Build Project and Publish
run: |
python -m tox -e clean,build
- name: Publish package
uses: pypa/gh-action-pypi-publish@v1.12.2
with:
user: __token__
password: ${{ secrets.PYPI_PASSWORD }}
47 changes: 20 additions & 27 deletions .github/workflows/pypi-test.yml
Original file line number Diff line number Diff line change
@@ -1,40 +1,33 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions

name: Test the library
name: Run tests

on:
push:
branches: [ master ]
branches: [master]
pull_request:
branches: [ master ]
branches: [master]

jobs:
build:

runs-on: ubuntu-latest
strategy:
matrix:
python-version: [ '3.8', '3.9', '3.10', '3.11', '3.12' ]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]

name: Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest tox
# - name: Lint with flake8
# run: |
# # stop the build if there are Python syntax errors or undefined names
# flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
# # flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with tox
run: |
tox
- uses: actions/checkout@v4

- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: "pip"

- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install tox
- name: Test with tox
run: |
tox
25 changes: 13 additions & 12 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,26 +17,27 @@ repos:
- id: mixed-line-ending
args: ['--fix=auto'] # replace 'auto' with 'lf' to enforce Linux/Mac line endings or 'crlf' for Windows

- repo: https://github.com/PyCQA/docformatter
rev: v1.7.5
hooks:
- id: docformatter
additional_dependencies: [tomli]
args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120]
# --config, ./pyproject.toml
# - repo: https://github.com/PyCQA/docformatter
# rev: master
# hooks:
# - id: docformatter
# additional_dependencies: [tomli]
# args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120]
# # --config, ./pyproject.toml

- repo: https://github.com/psf/black
rev: 24.8.0
hooks:
- id: black
language_version: python3
# - repo: https://github.com/psf/black
# rev: 24.8.0
# hooks:
# - id: black
# language_version: python3

- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.8
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
- id: ruff-format

## If like to embrace black styles even in the docs:
# - repo: https://github.com/asottile/blacken-docs
Expand Down
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# Changelog

## Version 0.3.0

- chore: Remove Python 3.8 (EOL)
- precommit: Replace docformatter with ruff's formatter

## Version 0.2.0

- Compatibility with NumPy 2.0
Expand Down
9 changes: 9 additions & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,15 @@ def setup(app):
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True

autodoc_default_options = {
'special-members': True,
'undoc-members': False,
'exclude-members': '__weakref__, __dict__, __str__, __module__, __init__'
}

autosummary_generate = True
autosummary_imported_members = True


# -- Options for HTML output -------------------------------------------------

Expand Down
4 changes: 4 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ extend-ignore = ["F821"]
[tool.ruff.pydocstyle]
convention = "google"

[tool.ruff.format]
docstring-code-format = true
docstring-code-line-length = 20

[tool.ruff.per-file-ignores]
"__init__.py" = ["E402", "F401"]

Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ package_dir =
=src

# Require a min/specific Python version (comma-separated conditions)
python_requires = >=3.8
python_requires = >=3.9

# Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0.
# Version specifiers like >=2.2,<3.0 avoid problems due to API changes in
Expand Down
53 changes: 12 additions & 41 deletions src/hdf5array/Hdf5CompressedSparseMatrixSeed.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,22 +98,14 @@ def __init__(

with File(self._path, "r") as handle:
self._indptr = handle[self._indptr_name][:]
if len(self._indptr.shape) != 1 or not issubdtype(
self._indptr.dtype, integer
):
raise ValueError(
"'indptr' dataset should be 1-dimensional and contain integers"
)
if len(self._indptr.shape) != 1 or not issubdtype(self._indptr.dtype, integer):
raise ValueError("'indptr' dataset should be 1-dimensional and contain integers")
if by_column:
if len(self._indptr) != shape[1] + 1:
raise ValueError(
"'indptr' dataset should have length equal to the number of columns + 1"
)
raise ValueError("'indptr' dataset should have length equal to the number of columns + 1")
else:
if len(self._indptr) != shape[0] + 1:
raise ValueError(
"'indptr' dataset should have length equal to the number of columns + 1"
)
raise ValueError("'indptr' dataset should have length equal to the number of columns + 1")
if self._indptr[0] != 0:
raise ValueError("first entry of 'indptr' dataset should be zero")
for i in range(1, len(self._indptr)):
Expand All @@ -122,9 +114,7 @@ def __init__(

ddset = handle[self._data_name]
if len(ddset.shape) != 1 or ddset.shape[0] != self._indptr[-1]:
raise ValueError(
"'data' dataset should have length equal to the number of non-zero elements"
)
raise ValueError("'data' dataset should have length equal to the number of non-zero elements")
self._modify_dtype = dtype is not None and dtype != ddset.dtype
if not self._modify_dtype:
dtype = ddset.dtype
Expand All @@ -133,14 +123,10 @@ def __init__(
# Not going to check for consistency of the indices themselves.
idset = handle[self._indices_name]
if len(idset.shape) != 1 or idset.shape[0] != self._indptr[-1]:
raise ValueError(
"'indices' dataset should have length equal to the number of non-zero elements"
)
raise ValueError("'indices' dataset should have length equal to the number of non-zero elements")
if not issubdtype(idset.dtype, integer):
raise ValueError("'indices' dataset should contain integers")
self._modify_index_dtype = (
index_dtype is not None and index_dtype != idset.dtype
)
self._modify_index_dtype = index_dtype is not None and index_dtype != idset.dtype
if not self._modify_index_dtype:
index_dtype = idset.dtype
self._index_dtype = index_dtype
Expand Down Expand Up @@ -269,9 +255,7 @@ def _extract_array(
start_idx = bisect_left(curindices, secondary_start)
end_idx = len(curindices)
if search_end:
end_idx = bisect_left(
curindices, secondary_end, lo=start_idx, hi=end_idx
)
end_idx = bisect_left(curindices, secondary_end, lo=start_idx, hi=end_idx)

if is_consecutive:
mod_indices = curindices[start_idx:end_idx]
Expand Down Expand Up @@ -406,14 +390,7 @@ def _consecutive(r, cols, values):
class Hdf5CompressedSparseMatrix(DelayedArray):
"""Compressed sparse matrix in a HDF5 file as a ``DelayedArray``."""

def __init__(
self,
path: str,
group_name: Optional[str],
shape: Tuple[int, int],
by_column: bool,
**kwargs
):
def __init__(self, path: str, group_name: Optional[str], shape: Tuple[int, int], by_column: bool, **kwargs):
"""To construct a ``Hdf5CompressedSparseMatrix`` from an existing :py:class:`~Hdf5CompressedSparseMatrixSeed`,
use :py:meth:`~delayedarray.wrap.wrap` instead.
Expand All @@ -439,9 +416,7 @@ def __init__(
if isinstance(path, Hdf5CompressedSparseMatrixSeed):
seed = path
else:
seed = Hdf5CompressedSparseMatrixSeed(
path, group_name, shape, by_column, **kwargs
)
seed = Hdf5CompressedSparseMatrixSeed(path, group_name, shape, by_column, **kwargs)
super(Hdf5CompressedSparseMatrix, self).__init__(seed)

@property
Expand Down Expand Up @@ -530,13 +505,9 @@ def to_scipy_sparse_matrix_from_Hdf5CompressedSparseMatrix(
_indptr = handle[x.indptr_name][:]

if x.by_column:
_matrix = scipy.sparse.csc_matrix(
(_data, _indices, _indptr), shape=x.shape, dtype=x.dtype
)
_matrix = scipy.sparse.csc_matrix((_data, _indices, _indptr), shape=x.shape, dtype=x.dtype)
else:
_matrix = scipy.sparse.csr_matrix(
(_data, _indices, _indptr), shape=x.shape, dtype=x.dtype
)
_matrix = scipy.sparse.csr_matrix((_data, _indices, _indptr), shape=x.shape, dtype=x.dtype)

if format == "csc":
return _matrix.tocsc()
Expand Down
4 changes: 1 addition & 3 deletions src/hdf5array/Hdf5DenseArraySeed.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,7 @@ def chunk_grid_Hdf5DenseArraySeed(x: Hdf5DenseArraySeed):


@extract_dense_array.register
def extract_dense_array_Hdf5DenseArraySeed(
x: Hdf5DenseArraySeed, subset: Tuple[Sequence[int], ...]
) -> numpy.ndarray:
def extract_dense_array_Hdf5DenseArraySeed(x: Hdf5DenseArraySeed, subset: Tuple[Sequence[int], ...]) -> numpy.ndarray:
"""See :py:meth:`~delayedarray.extract_dense_array.extract_dense_array`."""
converted = []
num_lists = 0
Expand Down

0 comments on commit 4ce41f3

Please sign in to comment.