2 次代码提交 89af977cc7 ... 8aabc35670

作者 SHA1 备注 提交日期
  Kseniya Belousova 8aabc35670 new commit 1 月之前
  Kseniya Belousova e4a6ca4a3d New commit 1 月之前

+ 1 - 0
.gitignore

@@ -2,3 +2,4 @@
 /.idea/
 .idea
 .idea/*
+".venv/" 

+ 5 - 0
requirements.in

@@ -0,0 +1,5 @@
+PyQt5
+numpy
+opencv-python
+pydicom
+scikit-image

+ 26 - 0
requirements.lock.txt

@@ -0,0 +1,26 @@
+contourpy==1.3.3
+cycler==0.12.1
+fonttools==4.59.1
+imageio==2.37.0
+kiwisolver==1.4.9
+-e git+https://git.physics.itmo.ru/nikita.babich/knee_seg@89af977cc7c935de5f442b0aa99268ccde51729e#egg=KneeSeg
+lazy_loader==0.4
+matplotlib==3.10.5
+networkx==3.5
+numpy==2.2.6
+opencv-python==4.12.0.88
+packaging==25.0
+pillow==11.3.0
+pydicom==3.0.1
+pyparsing==3.2.3
+PyQt5==5.15.11
+PyQt5-Qt5==5.15.2
+PyQt5_sip==12.17.0
+python-dateutil==2.9.0.post0
+scikit-image==0.25.2
+scipy==1.16.1
+setuptools==80.9.0
+six==1.17.0
+tifffile==2025.6.11
+wheel==0.45.1
+yattag==1.16.1

+ 19 - 0
src/KneeSeg.egg-info/PKG-INFO

@@ -0,0 +1,19 @@
+Metadata-Version: 2.4
+Name: KneeSeg
+Version: 0.1.0
+Summary: Knee segmentation application with DICOM tooling
+Author-email: Nikita Babich <nikita.babich@metalab.ifmo.ru>
+Requires-Python: <3.13,>=3.12
+Requires-Dist: numpy>=1.26
+Requires-Dist: pydicom>=2.4
+Requires-Dist: PyQt5>=5.15
+Requires-Dist: opencv-python>=4.8
+Requires-Dist: scikit-image>=0.22
+Requires-Dist: matplotlib>=3.8
+Requires-Dist: pillow>=10.0
+Requires-Dist: yattag>=1.15
+Provides-Extra: dev
+Requires-Dist: pip-tools>=7.4; extra == "dev"
+Requires-Dist: pytest>=8.0; extra == "dev"
+Requires-Dist: ruff>=0.5; extra == "dev"
+Requires-Dist: mypy>=1.10; extra == "dev"

+ 7 - 0
src/KneeSeg.egg-info/SOURCES.txt

@@ -0,0 +1,7 @@
+README.md
+pyproject.toml
+src/KneeSeg.egg-info/PKG-INFO
+src/KneeSeg.egg-info/SOURCES.txt
+src/KneeSeg.egg-info/dependency_links.txt
+src/KneeSeg.egg-info/requires.txt
+src/KneeSeg.egg-info/top_level.txt

+ 1 - 0
src/KneeSeg.egg-info/dependency_links.txt

@@ -0,0 +1 @@
+

+ 14 - 0
src/KneeSeg.egg-info/requires.txt

@@ -0,0 +1,14 @@
+numpy>=1.26
+pydicom>=2.4
+PyQt5>=5.15
+opencv-python>=4.8
+scikit-image>=0.22
+matplotlib>=3.8
+pillow>=10.0
+yattag>=1.15
+
+[dev]
+pip-tools>=7.4
+pytest>=8.0
+ruff>=0.5
+mypy>=1.10

+ 1 - 0
src/KneeSeg.egg-info/top_level.txt

@@ -0,0 +1 @@
+

二进制
src/knee/__pycache__/__init__.cpython-312.pyc


二进制
src/knee/__pycache__/app.cpython-312.pyc


二进制
src/knee/__pycache__/constants.cpython-312.pyc


二进制
src/knee/__pycache__/image_label.cpython-312.pyc


二进制
src/knee/__pycache__/main_window.cpython-312.pyc


二进制
src/knee/__pycache__/processing.cpython-312.pyc


二进制
src/knee/__pycache__/utils_dicom.cpython-312.pyc


二进制
src/knee/__pycache__/utils_rois.cpython-312.pyc


+ 5 - 2
src/knee/image_label.py

@@ -18,8 +18,9 @@ class ImageLabel(QLabel):
         self.setFocusPolicy(Qt.StrongFocus)
 
         # Основные данные
-        self.image = QImage()                           # текущее изображение (QImage)
-        self._img2d: np.ndarray | None = None           # «чистый» 2D кадр для вычислений
+        self.image = QImage()                               # текущее изображение (QImage)
+        self._img2d: np.ndarray | None = None               # «чистый» 2D кадр для вычислений
+        self._img_not_normalized: np.ndarray | None = None  # изображение без нормализации
 
         # ROI
         self.rois: Dict[int, List[dict]] = {}           # slice_idx -> list of roi dicts
@@ -156,6 +157,8 @@ class ImageLabel(QLabel):
         arr = ds.pixel_array
         img8 = normalize_to_uint8(arr, ds)
         self._img2d = img8.copy()
+        self._img_not_normalized = arr.copy()
+
 
         # готовим QImage поверх буфера
         h, w = img8.shape

+ 26 - 4
src/knee/main_window.py

@@ -8,6 +8,7 @@ from PyQt5.QtCore import Qt, QEvent
 from PyQt5.QtWidgets import QSizePolicy
 from PyQt5.QtCore import QTimer
 import pydicom
+import numpy as np
 
 from .constants import APP_TITLE, MARK_LABEL
 from .image_label import ImageLabel
@@ -31,6 +32,9 @@ class ROIDrawer(QMainWindow):
         self._build_ui()
         self._init_series_filters()
 
+        # Бинарная маска сегментации для сохранения
+        self.mask_for_save = None
+
     def _build_ui(self):
         self.setWindowTitle(APP_TITLE)
         self.resize(1600, 900)
@@ -164,6 +168,10 @@ class ROIDrawer(QMainWindow):
         load_rois_btn.clicked.connect(self._load_rois)
         btns.addWidget(load_rois_btn)
 
+        save_mask_btn = QPushButton('Save mask', self)
+        save_mask_btn.clicked.connect(self._save_mask)
+        btns.addWidget(save_mask_btn)
+
         btns.addStretch(1)
         QTimer.singleShot(0, self._rescale_previews_once)
         self.sequence_dropdown.currentIndexChanged.connect(self._on_sequence_changed)
@@ -251,6 +259,15 @@ class ROIDrawer(QMainWindow):
         except Exception as e:
             QMessageBox.critical(self, "Error", str(e))
 
+    def _save_mask(self):
+        path, _ = QFileDialog.getSaveFileName(self, "Save mask", "", "Numpy Files (*.npy);;All Files (*)")
+        if not path:
+            return
+        try:
+            np.save(path, self.mask_for_save)
+        except Exception as e:
+            QMessageBox.critical(self, "Error", str(e))
+
     def _load_rois(self):
         path, _ = QFileDialog.getOpenFileName(self, "Load ROIs", "", "JSON Files (*.json);;All Files (*)")
         if not path:
@@ -308,7 +325,7 @@ class ROIDrawer(QMainWindow):
 
                 rois_for_slice = self.image_label.rois.get(i, [])
                 vol_ml = compute_volume_ml(
-                    img8, rois_for_slice,
+                    img8, arr, rois_for_slice,
                     self.threshold_brightness, self.contours_thr,
                     spacing_xy, z
                 )
@@ -326,23 +343,27 @@ class ROIDrawer(QMainWindow):
     def _grab_gray_np(self):
         return self.image_label._img2d
 
+    def _grab_not_normalized_gray_np(self):
+        return self.image_label._img_not_normalized
+
     def _recompute_views(self):
         """Только превью. Объёмы не трогаем (они считаются пакетно)."""
         arr = self._grab_gray_np()
+        not_normalized_arr = self._grab_not_normalized_gray_np()
         if arr is None:
             return
         rois_for_slice = self.image_label.rois.get(self.image_label.slice_index, [])
 
         # filtration preview
-        pix = apply_filtration(arr, rois_for_slice, self.threshold_brightness)
+        pix = apply_filtration(arr, not_normalized_arr, rois_for_slice, self.threshold_brightness)
         if pix is not None:
             self._last_filtration_pixmap = pix
             self._set_label_pixmap_keep_ar(self.filtration_label, pix)
 
         # segmentation preview (без записи объёма)
         z = self.image_label.slice_thickness if self.use_slice_thickness else self.image_label.spacing_between_slices
-        seg_pix, _vol_ml_preview, _ = apply_segmentation(
-            arr, rois_for_slice, self.threshold_brightness, self.contours_thr,
+        seg_pix, _vol_ml_preview, _, filt_mask = apply_segmentation(
+            arr, not_normalized_arr, rois_for_slice, self.threshold_brightness, self.contours_thr,
             self.image_label.spacing_xy, z,
             volume_by_slice={},  # пустой, чтобы ничего не сохранялось
             slice_index=self.image_label.slice_index
@@ -350,6 +371,7 @@ class ROIDrawer(QMainWindow):
         if seg_pix is not None:
             self._last_segmentation_pixmap = seg_pix
             self._set_label_pixmap_keep_ar(self.segmentation_label, seg_pix)
+        self.mask_for_save = filt_mask
 
     # ---------- Helpers ----------
     def _set_label_pixmap_keep_ar(self, label, pixmap):

+ 19 - 12
src/knee/processing.py

@@ -22,27 +22,28 @@ def qimage_from_bgr(img_bgr: np.ndarray) -> QImage:
     return QImage(rgb.data, w, h, rgb.strides[0], QImage.Format_RGB888)
 
 
-def apply_filtration(src_gray: np.ndarray, rois_for_slice, threshold_brightness: float):
+def apply_filtration(src_gray: np.ndarray, not_norm_gray: np.ndarray, rois_for_slice, threshold_brightness: float):
     if src_gray is None:
         return None
     h, w = src_gray.shape
     mask = build_mask_from_rois(rois_for_slice, (h, w))
 
-    masked = src_gray.copy()
+    #masked = src_gray.copy()
+    masked = not_norm_gray.copy()
     masked[mask != 255] = 0
 
     thr_val = threshold_brightness * float(masked.max()) if masked.max() > 0 else 0.0
     seg = np.zeros_like(src_gray, dtype=np.uint8)
-    seg[(mask == 255) & (src_gray >= thr_val)] = 255
+    seg[(mask == 255) & (not_norm_gray >= thr_val)] = 255
 
-    contours, _ = cv2.findContours(seg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
+    contours, _ = cv2.findContours(seg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
     color = cv2.cvtColor(src_gray, cv2.COLOR_GRAY2BGR)
     cv2.drawContours(color, contours, -1, (255, 0, 0), 2)
 
     return QPixmap.fromImage(qimage_from_bgr(color))
 
 
-def apply_segmentation(src_gray: np.ndarray, rois_for_slice, threshold_brightness: float,
+def apply_segmentation(src_gray: np.ndarray, not_norm_gray: np.ndarray, rois_for_slice, threshold_brightness: float,
                        area_rel: float, spacing_xy, z_spacing, volume_by_slice: dict, slice_index: int):
     if src_gray is None:
         return None, 0.0, 0.0
@@ -50,18 +51,23 @@ def apply_segmentation(src_gray: np.ndarray, rois_for_slice, threshold_brightnes
     h, w = src_gray.shape
     mask = build_mask_from_rois(rois_for_slice, (h, w))
 
-    masked = src_gray.copy()
+    #masked = src_gray.copy()
+    masked = not_norm_gray.copy()
     masked[mask != 255] = 0
 
     thr_val = threshold_brightness * float(masked.max()) if masked.max() > 0 else 0.0
     seg = np.zeros_like(src_gray, dtype=np.uint8)
-    seg[(mask == 255) & (src_gray >= thr_val)] = 255
+    seg[(mask == 255) & (not_norm_gray >= thr_val)] = 255
 
     contours, _ = cv2.findContours(seg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
     max_area = max((cv2.contourArea(c) for c in contours), default=0.0)
     area_thr = float(area_rel) * max_area
     filtered = [c for c in contours if cv2.contourArea(c) > area_thr]
 
+    # создаём бинарную маску только с filtered контурами для дальнейшего возможного сохранения
+    filtered_mask = np.zeros_like(src_gray, dtype=np.uint8)
+    cv2.drawContours(filtered_mask, filtered, -1, color=255, thickness=cv2.FILLED)
+
     spacing_x, spacing_y = spacing_xy
     voxel_mm3 = float(spacing_x) * float(spacing_y) * float(z_spacing)
     pixel_area_sum = sum(cv2.contourArea(c) for c in filtered)
@@ -74,25 +80,26 @@ def apply_segmentation(src_gray: np.ndarray, rois_for_slice, threshold_brightnes
     cv2.putText(color, f"Slice: {slice_index}  Volume (ml): {volume_ml:.3f}",
                 (10, 22), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 1)
 
-    return QPixmap.fromImage(qimage_from_bgr(color)), volume_ml, total_ml
+    return QPixmap.fromImage(qimage_from_bgr(color)), volume_ml, total_ml, filtered_mask
 
 
 # НОВОЕ: расчёт только объёма (без рисования, без модификации словарей)
-def compute_volume_ml(src_gray: np.ndarray, rois_for_slice, threshold_brightness: float,
+def compute_volume_ml(src_gray: np.ndarray, not_norm_gray: np.ndarray, rois_for_slice, threshold_brightness: float,
                       area_rel: float, spacing_xy, z_spacing: float) -> float:
     if src_gray is None:
         return 0.0
     h, w = src_gray.shape
     mask = build_mask_from_rois(rois_for_slice, (h, w))
 
-    masked = src_gray.copy()
+    #masked = src_gray.copy()
+    masked = not_norm_gray.copy()
     masked[mask != 255] = 0
 
     thr_val = threshold_brightness * float(masked.max()) if masked.max() > 0 else 0.0
     seg = np.zeros_like(src_gray, dtype=np.uint8)
-    seg[(mask == 255) & (src_gray >= thr_val)] = 255
+    seg[(mask == 255) & (not_norm_gray >= thr_val)] = 255
 
-    contours, _ = cv2.findContours(seg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
+    contours, _ = cv2.findContours(seg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
     max_area = max((cv2.contourArea(c) for c in contours), default=0.0)
     area_thr = float(area_rel) * max_area
     filtered = [c for c in contours if cv2.contourArea(c) > area_thr]