Search code examples
pythonpyqt5qgraphicsviewqgraphicsitempyside2

How to get the coordinate of the loaded image and not the one from the display


I'm working on an image viewer with button allowing to get the coordinate of a loaded image when clicked on it. However, when I zoom on the image and click on it, I get the coordinate of the display in which the piece of the image sits, independently of where the image is centered inside the square. What I would like to get is the coordinate of the loaded image where it is clicked. This means that if the piece of image in the display is zoom or scrolled (up/down or right/left) the coordinate should adapt accordingly. It might be an easy question with an easy answer however I'm stuck for more than a week on that problem despite having tried to understand the documentation of the Qt website without success

For example I tried to replace the line

self.photo_clicked.emit(QtCore.QPoint(event.pos()))

by

self.photo_clicked.emit(QtCore.QPoint(QGraphicsView.mapFromScene(event.pos())))

but it gives me that error:

TypeError: descriptor 'mapFromScene' requires a 'PySide2.QtWidgets.QGraphicsView' object but received a 'PySide2.QtCore.QPoint'

Here is the the script (part of it is inspired from this example):

from PySide2.QtWidgets import (QWidget, QApplication, QSlider,
        QGraphicsView, QGraphicsScene, QVBoxLayout)
from PySide2.QtGui import QPainter, QColor
from PySide2 import QtCore, QtWidgets, QtGui
from PySide2.QtOpenGL import *
from PySide2.QtCore import *
from PySide2.QtGui import *
import sys

image_path_str='image.jpg'

class View(QGraphicsView):
    photo_clicked = QtCore.Signal(QtCore.QPoint)

    def __init__(self, parent):
        super(View, self).__init__()
        self.scene = QtWidgets.QGraphicsScene(self)
        self.photo = QtWidgets.QGraphicsPixmapItem()
        self.scene.addItem(self.photo)
        pixmap = QtGui.QPixmap(image_path_str)
        self.photo.setPixmap(pixmap)
        self.setScene(self.scene)
        self.setDragMode(QtWidgets.QGraphicsView.ScrollHandDrag)

    def Hand_drag(self):
            self.setDragMode(QtWidgets.QGraphicsView.ScrollHandDrag)

    def pixel_pointer(self):
            self.setDragMode(QtWidgets.QGraphicsView.NoDrag)

    def mousePressEvent(self, event):
        if self.photo.isUnderMouse():
                self.photo_clicked.emit(QtCore.QPoint(event.pos()))

        super(View, self).mousePressEvent(event)

class Window(QWidget):

    def __init__(self):
        super(Window, self).__init__()
        self.view = View(self)

        self.btn_hand_drag = QtWidgets.QCheckBox("Hand drag", self)
        self.btn_hand_drag.clicked.connect(self.view.Hand_drag)
        self.btn_hand_drag.clicked.connect(self.btn_hand_drag_uncheck_others)

        self.btn_pix_info1 = QtWidgets.QCheckBox("Point 1", self)
        self.btn_pix_info1.clicked.connect(self.view.pixel_pointer)
        self.btn_pix_info1.clicked.connect(self.btn_pix_info1_drag_uncheck_other)
        self.editPixInfo1 = QtWidgets.QLineEdit(self)
        self.editPixInfo1.setReadOnly(True)

        self.btn_pix_info2 = QtWidgets.QCheckBox("Point 2", self)
        self.btn_pix_info2.clicked.connect(self.view.pixel_pointer)
        self.btn_pix_info2.clicked.connect(self.btn_pix_info2_drag_uncheck_other)
        self.editPixInfo2 = QtWidgets.QLineEdit(self)
        self.editPixInfo2.setReadOnly(True)

        self.view.photo_clicked.connect(self.photo_clicked)

        slider = QSlider(Qt.Horizontal, self)
        slider.setRange(1, 500)
        slider.setValue(100)
        slider.valueChanged[int].connect(self.zoom)

        vbox = QVBoxLayout()
        vbox.addWidget(self.btn_hand_drag)
        vbox.addWidget(self.btn_pix_info1)
        vbox.addWidget(self.editPixInfo1)
        vbox.addWidget(self.btn_pix_info2)
        vbox.addWidget(self.editPixInfo2)
        vbox.addWidget(self.view)
        vbox.addWidget(slider)

        self.setLayout(vbox)
        self.setWindowTitle("Image viewer")
        self.setGeometry(200, 200, 1000, 800)

    def zoom(self, value):
        val = value / 100
        self.view.resetTransform()
        self.view.scale(val, val)

    def btn_hand_drag_uncheck_others(self):
        self.btn_pix_info1.setChecked(False)
        self.btn_pix_info2.setChecked(False)

    def btn_pix_info1_drag_uncheck_other(self):
        self.btn_hand_drag.setChecked(False)
        self.btn_pix_info2.setChecked(False)

    def btn_pix_info2_drag_uncheck_other(self):
        self.btn_hand_drag.setChecked(False)
        self.btn_pix_info1.setChecked(False)

    def photo_clicked(self, pos):
        if self.btn_pix_info1.isChecked():
                self.editPixInfo1.setText('%d, %d' % (pos.x(), pos.y()))
        if self.btn_pix_info2.isChecked():
                self.editPixInfo2.setText('%d, %d' % (pos.x(), pos.y()))

app = QApplication.instance()
if app is None:
        app = QApplication([])
w = Window()
w.show()
w.raise_()
app.exec_()

Solution

  • When you zoom and press a pixel this can represent a set of pixels or vice versa, I think you understand that because to show the image is a downsample or an upsamble as the case may be. So in the end, whatever it is, you are going to get a pixel that is not going to be ideal, but very close. Going to the point, considering that in the implementation of @ekhumuro there is no escalation of the items it is correct to use the mouse position with respect to the coordinate system of the item and not the scene, since the item can move, so that the general solution considering that the item is not transformed is:

    def mousePressEvent(self, event):
        if self._photo.isUnderMouse():
            p = self._photo.mapToItem(self._photo, self.mapToScene(event.pos()))
            self.photoClicked.emit(p.toPoint())
        super(PhotoViewer, self).mousePressEvent(event)