Unverified Commit badf273a authored by Baber Abbasi's avatar Baber Abbasi Committed by GitHub
Browse files

update pre-commit hooks and git actions (#2497)

* pre-commit update

* update github actions

* make logging less verbose

* fix artifacts
parent e20e1ddc
......@@ -16,7 +16,7 @@ jobs:
name: Scan for changed tasks
steps:
- name: checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 2 # OR "2" -> To retrieve the preceding commit.
......@@ -47,7 +47,7 @@ jobs:
- name: Set up Python 3.9
if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true'
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: 3.9
cache: 'pip'
......
......@@ -13,7 +13,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: "3.x"
......@@ -26,7 +26,7 @@ jobs:
- name: Build a binary wheel and a source tarball
run: python3 -m build
- name: Store the distribution packages
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: python-package-distributions
path: dist/
......@@ -46,7 +46,7 @@ jobs:
steps:
- name: Download all the dists
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/
......@@ -68,7 +68,7 @@ jobs:
steps:
- name: Download all the dists
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/
......
......@@ -63,9 +63,9 @@ jobs:
- name: Test with pytest
run: python -m pytest --showlocals -s -vv -n=auto --ignore=tests/models/test_neuralmagic.py --ignore=tests/models/test_openvino.py
- name: Archive artifacts
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: output_results
name: output_testcpu${{ matrix.python-version }}
path: |
test_logs/*
testmodels:
......@@ -87,9 +87,3 @@ jobs:
pip install -e '.[dev,optimum,deepsparse,sparseml,api]' --extra-index-url https://download.pytorch.org/whl/cpu
- name: Test with pytest
run: python -m pytest tests/models --showlocals -s -vv
- name: Archive artifacts
uses: actions/upload-artifact@v3
with:
name: output_results
path: |
test_logs/*
......@@ -2,7 +2,7 @@
exclude: ^tests/testdata/
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v5.0.0
hooks:
- id: check-added-large-files
- id: check-ast
......@@ -29,7 +29,7 @@ repos:
- id: mixed-line-ending
args: [--fix=lf]
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.8
rev: v0.7.4
hooks:
# Run the linter.
- id: ruff
......
......@@ -464,7 +464,7 @@ class HFLM(TemplateLM):
elif backend == "seq2seq":
self.backend = backend
eval_logger.info(
f"Overrode HF model backend type, and using type '{backend}'"
f"Overrode HF model backend type, and using type '{self.backend}'"
)
else:
# determine and use the default HF backend for this model, based on its config + metadata.
......@@ -476,12 +476,12 @@ class HFLM(TemplateLM):
# models like MBart are listed in both seq2seq and causal mistakenly in HF transformers.
# these special cases should be treated as seq2seq models.
self.backend = "seq2seq"
eval_logger.info(f"Using model type '{backend}'")
eval_logger.debug(f"Using model type '{self.backend}'")
elif (
getattr(self.config, "model_type") in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
):
self.backend = "causal"
eval_logger.info(f"Using model type '{backend}'")
eval_logger.debug(f"Using model type '{self.backend}'")
else:
if not trust_remote_code:
eval_logger.warning(
......@@ -493,7 +493,7 @@ class HFLM(TemplateLM):
# then we default to assuming AutoModelForCausalLM
self.backend = "causal"
eval_logger.info(
f"Model type cannot be determined. Using default model type '{backend}'"
f"Model type cannot be determined. Using default model type '{self.backend}'"
)
if self.AUTO_MODEL_CLASS is None:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment