screen sharing initial commit
This commit is contained in:
parent
300b28bdfa
commit
1e1772e306
4 changed files with 38 additions and 7 deletions
|
@ -6,6 +6,7 @@ from toxav_enums import *
|
||||||
import cv2
|
import cv2
|
||||||
import itertools
|
import itertools
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import screen_sharing
|
||||||
# TODO: play sound until outgoing call will be started or cancelled
|
# TODO: play sound until outgoing call will be started or cancelled
|
||||||
|
|
||||||
|
|
||||||
|
@ -203,6 +204,10 @@ class AV:
|
||||||
self._video_width = s.video['width']
|
self._video_width = s.video['width']
|
||||||
self._video_height = s.video['height']
|
self._video_height = s.video['height']
|
||||||
|
|
||||||
|
if s.video['device'] == -1:
|
||||||
|
self._video = screen_sharing.DesktopGrabber(s.video['x'], s.video['y'],
|
||||||
|
s.video['width'], s.video['height'])
|
||||||
|
else:
|
||||||
self._video = cv2.VideoCapture(s.video['device'])
|
self._video = cv2.VideoCapture(s.video['device'])
|
||||||
self._video.set(cv2.CAP_PROP_FPS, 25)
|
self._video.set(cv2.CAP_PROP_FPS, 25)
|
||||||
self._video.set(cv2.CAP_PROP_FRAME_WIDTH, self._video_width)
|
self._video.set(cv2.CAP_PROP_FRAME_WIDTH, self._video_width)
|
||||||
|
|
|
@ -832,8 +832,12 @@ class VideoSettings(CenteredWidget):
|
||||||
self.input.setGeometry(QtCore.QRect(25, 30, 350, 30))
|
self.input.setGeometry(QtCore.QRect(25, 30, 350, 30))
|
||||||
self.input.currentIndexChanged.connect(self.selectionChanged)
|
self.input.currentIndexChanged.connect(self.selectionChanged)
|
||||||
import cv2
|
import cv2
|
||||||
self.devices = []
|
self.devices = [-1]
|
||||||
self.frame_max_sizes = []
|
screen = QtWidgets.QApplication.primaryScreen()
|
||||||
|
size = screen.size()
|
||||||
|
self.frame_max_sizes = [(size.width(), size.height())]
|
||||||
|
desktop = QtWidgets.QApplication.translate("videoSettingsForm", "Desktop")
|
||||||
|
self.input.addItem(desktop)
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
v = cv2.VideoCapture(i)
|
v = cv2.VideoCapture(i)
|
||||||
if v.isOpened():
|
if v.isOpened():
|
||||||
|
|
22
toxygen/screen_sharing.py
Normal file
22
toxygen/screen_sharing.py
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
import numpy as np
|
||||||
|
from PyQt5 import QtWidgets
|
||||||
|
|
||||||
|
|
||||||
|
class DesktopGrabber:
|
||||||
|
|
||||||
|
def __init__(self, x, y, width, height):
|
||||||
|
self._x = x
|
||||||
|
self._y = y
|
||||||
|
self._width = width
|
||||||
|
self._height = height
|
||||||
|
self._width -= width % 4
|
||||||
|
self._height -= height % 4
|
||||||
|
self._screen = QtWidgets.QApplication.primaryScreen()
|
||||||
|
|
||||||
|
def read(self):
|
||||||
|
pixmap = self._screen.grabWindow(0, self._x, self._y, self._width, self._height)
|
||||||
|
image = pixmap.toImage()
|
||||||
|
s = image.bits().asstring(self._width * self._height * 4)
|
||||||
|
arr = np.fromstring(s, dtype=np.uint8).reshape((self._height, self._width, 4))
|
||||||
|
|
||||||
|
return True, arr
|
|
@ -47,7 +47,7 @@ class Settings(dict, Singleton):
|
||||||
self.audio = {'input': p.get_default_input_device_info()['index'] if input_devices else -1,
|
self.audio = {'input': p.get_default_input_device_info()['index'] if input_devices else -1,
|
||||||
'output': p.get_default_output_device_info()['index'] if output_devices else -1,
|
'output': p.get_default_output_device_info()['index'] if output_devices else -1,
|
||||||
'enabled': input_devices and output_devices}
|
'enabled': input_devices and output_devices}
|
||||||
self.video = {'device': 0, 'width': 640, 'height': 480}
|
self.video = {'device': -1, 'width': 640, 'height': 480, 'x': 0, 'y': 0}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_auto_profile():
|
def get_auto_profile():
|
||||||
|
|
Loading…
Reference in a new issue