Skip to content

Commit

Permalink
[Draft] Llm on (#2165)
Browse files Browse the repository at this point in the history
* Add TaskType Explanation

Signed-off-by: Bepitic <[email protected]>

* Add llm model

Signed-off-by: Bepitic <[email protected]>

* add ollama

Signed-off-by: Bepitic <[email protected]>

* better description for descr in title

Signed-off-by: Bepitic <[email protected]>

* add text of llm into imageResult visualization

* add text of llm into imageResult visualization

Signed-off-by: Bepitic <[email protected]>

* latest changes

Signed-off-by: Bepitic <[email protected]>

* add wip llava/llava_next

Signed-off-by: Bepitic <[email protected]>

* add init

Signed-off-by: Bepitic <[email protected]>

* add text of llm into imageResult visualization

Signed-off-by: Bepitic <[email protected]>

* latest changes

Signed-off-by: Bepitic <[email protected]>

* upd Lint

Signed-off-by: Bepitic <[email protected]>

* fix visualization with description

Signed-off-by: Bepitic <[email protected]>

* show the images every batch

Signed-off-by: Bepitic <[email protected]>

* fix docstring and error management

Signed-off-by: Bepitic <[email protected]>

* Add compatibility for TaskType.EXPLANATION.

Signed-off-by: Bepitic <[email protected]>

* Remove, show in the engine-Visualization.

* fix visualization and llm openai multishot.

* fix Circular import problem

* Add HugginFace To LLavaNext

Signed-off-by: Bepitic <[email protected]>

---------

Signed-off-by: Bepitic <[email protected]>
  • Loading branch information
Bepitic authored Oct 2, 2024
1 parent 191e21f commit 21287ee
Show file tree
Hide file tree
Showing 65 changed files with 9,987 additions and 37 deletions.
1 change: 1 addition & 0 deletions src/anomalib/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@ class TaskType(str, Enum):
CLASSIFICATION = "classification"
DETECTION = "detection"
SEGMENTATION = "segmentation"
EXPLANATION = "explanation"
4 changes: 2 additions & 2 deletions src/anomalib/callbacks/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,10 @@ def setup(
pixel_metric_names: list[str] | dict[str, dict[str, Any]]
if self.pixel_metric_names is None:
pixel_metric_names = []
elif self.task == TaskType.CLASSIFICATION:
elif self.task in (TaskType.CLASSIFICATION, TaskType.EXPLANATION):
pixel_metric_names = []
logger.warning(
"Cannot perform pixel-level evaluation when task type is classification. "
"Cannot perform pixel-level evaluation when task type is classification or explanation. "
"Ignoring the following pixel-level metrics: %s",
self.pixel_metric_names,
)
Expand Down
3 changes: 2 additions & 1 deletion src/anomalib/data/base/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
_EXPECTED_COLUMNS_SEGMENTATION = [*_EXPECTED_COLUMNS_CLASSIFICATION, "mask_path"]
_EXPECTED_COLUMNS_PERTASK = {
"classification": _EXPECTED_COLUMNS_CLASSIFICATION,
"explanation": _EXPECTED_COLUMNS_CLASSIFICATION,
"segmentation": _EXPECTED_COLUMNS_SEGMENTATION,
"detection": _EXPECTED_COLUMNS_SEGMENTATION,
}
Expand Down Expand Up @@ -169,7 +170,7 @@ def __getitem__(self, index: int) -> dict[str, str | torch.Tensor]:
image = read_image(image_path, as_tensor=True)
item = {"image_path": image_path, "label": label_index}

if self.task == TaskType.CLASSIFICATION:
if self.task in (TaskType.CLASSIFICATION, TaskType.EXPLANATION):
item["image"] = self.transform(image) if self.transform else image
elif self.task in {TaskType.DETECTION, TaskType.SEGMENTATION}:
# Only Anomalous (1) images have masks in anomaly datasets
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/data/base/depth.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def __getitem__(self, index: int) -> dict[str, str | torch.Tensor]:
depth_image = to_tensor(read_depth_image(depth_path))
item = {"image_path": image_path, "depth_path": depth_path, "label": label_index}

if self.task == TaskType.CLASSIFICATION:
if self.task in (TaskType.CLASSIFICATION, TaskType.EXPLANATION):
item["image"], item["depth_image"] = (
self.transform(image, depth_image) if self.transform else (image, depth_image)
)
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/deploy/inferencers/openvino_inferencer.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def post_process(self, predictions: np.ndarray, metadata: dict | DictConfig | No
pred_idx = pred_score >= metadata["image_threshold"]
pred_label = LabelName.ABNORMAL if pred_idx else LabelName.NORMAL

if task == TaskType.CLASSIFICATION:
if task in (TaskType.CLASSIFICATION, TaskType.EXPLANATION):
_, pred_score = self._normalize(pred_scores=pred_score, metadata=metadata)
elif task in {TaskType.SEGMENTATION, TaskType.DETECTION}:
if "pixel_threshold" in metadata:
Expand Down
8 changes: 8 additions & 0 deletions src/anomalib/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@
Fastflow,
Fre,
Ganomaly,
Llava,
Llavanext,
Llm,
Llmollama,
Padim,
Patchcore,
ReverseDistillation,
Expand Down Expand Up @@ -59,6 +63,10 @@ class UnknownModelError(ModuleNotFoundError):
"Uflow",
"AiVad",
"WinClip",
"Llm",
"Llmollama",
"Llava",
"Llavanext",
]

logger = logging.getLogger(__name__)
Expand Down
8 changes: 8 additions & 0 deletions src/anomalib/models/image/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@
from .fastflow import Fastflow
from .fre import Fre
from .ganomaly import Ganomaly
from .llava import Llava
from .llava_next import Llavanext
from .llm import Llm
from .llm_ollama import Llmollama
from .padim import Padim
from .patchcore import Patchcore
from .reverse_distillation import ReverseDistillation
Expand Down Expand Up @@ -41,4 +45,8 @@
"Stfpm",
"Uflow",
"WinClip",
"Llm",
"Llmollama",
"Llava",
"Llavanext",
]
8 changes: 8 additions & 0 deletions src/anomalib/models/image/llava/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
"""Llm model."""

# Copyright (C) 2023-2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from .lightning_model import Llava

__all__ = ["Llava"]
13 changes: 13 additions & 0 deletions src/anomalib/models/image/llava/constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
CONTROLLER_HEART_BEAT_EXPIRATION = 30
WORKER_HEART_BEAT_INTERVAL = 15

LOGDIR = "."

# Model Constants
IGNORE_INDEX = -100
IMAGE_TOKEN_INDEX = -200
DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
DEFAULT_IM_START_TOKEN = "<im_start>"
DEFAULT_IM_END_TOKEN = "<im_end>"
IMAGE_PLACEHOLDER = "<image-placeholder>"
Loading

0 comments on commit 21287ee

Please sign in to comment.