main
yumoqing 2022-06-29 19:38:51 +08:00
parent 479a6e7e9a
commit 88f2a18c8d
11 changed files with 33966 additions and 0 deletions

138
kivycv/audio.py 100644
View File

@ -0,0 +1,138 @@
import os
import pickle
from functools import partial
import wave
import pyaudio
import time
import tempfile
from appPublic.app_logger import AppLogger, create_logger
class Audio(AppLogger, pyaudio.PyAudio):
def __init__(self, save_file=None):
AppLogger.__init__(self)
pyaudio.PyAudio.__init__(self)
self.temp_filename = None
self.chunk = 1024
self.devices = [ self.get_device_info_by_index(i) \
for i in range(self.get_device_count()) ]
def get_input_device(self):
return [ d for d in self.devices if d.maxInputChannels > 0 ]
def get_output_device(self):
return [ d for d in self.devices if d.maxOutputChannels > 0 ]
def tmpfile(self):
x = tempfile.mkstemp(suffix='.wav')
os.close(x[0])
self.temp_filename = x[1]
def record_cb(self, in_data, frame_count, time_info, status):
bdata = pickle.dumps(in_data)
self.info('frame_count=%s, time_info=%s, status=%s, bytes count=%s', \
frame_count, time_info, status, len(bdata))
self.rec_frames += frame_count
self.current_ts = time.time()
self.wavfile.writeframesraw(in_data)
if self.running:
return (None, pyaudio.paContinue)
return (None, pyaudio.paComplete)
def replay_cb(self, in_data, frame_count, time_info, status):
data = self.wavfile.readframes(frame_count)
bdata = pickle.dumps(data)
self.info('frame_count=%s, data length in bytes=%s', \
frame_count, len(bdata))
if not data:
return (None, pyaudio.paComplete)
return (data, pyaudio.paContinue)
def get_output_index(self):
dev_cnt = self.get_device_count()
for i in range(dev_cnt):
x = self.get_device_info_by_index(i)
print(x)
return dev_cnt - 1
def record(self, save_file=None, stop_cond_func=None):
filename = save_file
if filename is None:
self.tmpfile()
filename = self.temp_filename
self.wavfile = wave.open(filename, 'wb')
self.wavfile.setnchannels(2)
self.wavfile.setsampwidth(2)
self.wavfile.setframerate(44100.00)
self.stream = self.open(format=pyaudio.paInt16,
channels=2,
rate=44100,
input=True,
frames_per_buffer=self.chunk,
stream_callback=self.record_cb)
self.stream.start_stream()
self.running = True
self.rec_frames = 0
self.start_ts = self.current_ts = time.time()
while self.stream.is_active():
if stop_cond_func and stop_cond_func():
self.running = False
time.sleep(0.05)
self.stream.stop_stream()
self.stream.close()
self.wavfile.close()
if save_file is None:
self.replay()
def replay(self, play_file=None):
idx = self.get_output_index()
x = self.get_device_info_by_index(idx)
y = self.get_default_input_device_info()
self.info('default_input=%s, default_output=%s', y, x)
if play_file is None:
play_file = self.temp_filename
self.wavfile = wave.open(play_file, 'rb')
format = self.get_format_from_width(self.wavfile.getsampwidth())
framerate=self.wavfile.getframerate()
self.info('format=%s, framerate=%s', format, framerate)
self.stream = self.open(format=format,
channels=self.wavfile.getnchannels(),
rate=framerate,
output=True,
output_device_index=idx,
frames_per_buffer=self.chunk,
stream_callback=self.replay_cb)
self.stream.start_stream()
while self.stream.is_active():
time.sleep(0.05)
self.stream.stop_stream()
self.stream.close()
self.wavfile.close()
def __del__(self):
if self.temp_filename:
os.remove(self.temp_filename)
if __name__ == '__main__':
import sys
def stop_func(audio):
if audio.current_ts - audio.start_ts >= 10:
audio.running = False
create_logger('audio', levelname='debug')
a = Audio()
if 'replay' in sys.argv[0]:
if len(sys.argv) < 2:
print(f'usage:\n{sys.argv[0]} WAVFILE')
sys.exit(1)
a.replay(sys.argv[1])
elif 'record' in sys.argv[0]:
sf = None
if len(sys.argv) >= 2:
sf = sys.argv[1]
f = partial(stop_func, a)
a.record(sf, stop_cond_func=f)

View File

@ -0,0 +1,44 @@
import tempfile
import pyaudio
import wave
from kivy.event import EventDispatcher
from kivy.properties import NumericProperty
from kivyblocks.baseWidget import HBox
from kviyblocks.micphone import Micphone
class AudioRecorder(EventDispatcher):
fs = NumericProperty(None)
filename = StringProperty(None)
voice_src = OjbectProperty(None)
def __init__(self, **kw):
super(AudioRecorder, self).__init__(**kw)
self.saving = False
if not self.filename:
self.mk_temp_file()
def mk_temp_file(self):
self.filename = tempfile.mktemp(suffix='.wav')
def on_filename(self, *args):
self.wf = wave.open(self.filename, 'wb')
def on_voice_src(self, *args):
audio_profile = self.voice_src.audio_profile()
self.wf.setnchannels(audio_profile['channels'])
self.wf.setsamplewidth(audio_profile['sample_size'])
self.wf.setframerate(audio_profiel['sample_rate'])
self.voice_src.bind(on_fps=self.write)
def write(self, o, d):
if self.saving:
self.wf.write(''.join(d))
def start(self):
if not self.voice_src.recording:
self.voice_src.start()
self.saving = True
def stop(self):
self.saving = False
wf.close()

View File

@ -0,0 +1,67 @@
import kivy
import numpy as np
from .micphone import Micphone
from kivy.uix.camera import Camera
from kivy.properties import NumericProperty
from kivy.event import EventDispatcher
from .android_rotation import *
if kivy.platform in [ 'win', 'linux', 'macosx' ]:
from PIL import ImageGrab
class ScreenWithMic(Micphone, EventDispatcher):
def __init__(self, **kw):
super(ScreenWithMic, self).__init__(**kw)
def get_image_data(self):
image = ImageGrab.grab()
imgdata = image.tostring()
return imgdata
def get_fps_data(self, *args):
ad = super(CameraWithMic, self).get_fps_data()
vd = self.get_image_data()
d = {
'v':vd,
'a':ad
}
return d
VS={
ROTATION_0:270,
ROTATION_90:0,
ROTATION_180:90,
ROTATION_270:180,
}
class CameraWithMic(Micphone, Camera):
angle = NumericProperty(0)
def __init__(self, **kw):
super(CameraWithMic, self).__init__(**kw)
self.isAndroid = kivy.platform == 'android'
self.set_angle(-90)
def set_angle(self, angle):
self.angle = angle
def image_rotation(self):
if not self.isAndroid:
return
x = get_rotation()
self.angle = VS[x]
def get_image_data(self):
image = np.frombuffer(self.texture.pixels, dtype='uint8')
image = image.reshape(self.texture.height, self.texture.width, -1)
imgdata = image.tostring()
return imgdata
def get_fps_data(self, *args):
# self.image_rotation()
ad = super(CameraWithMic, self).get_fps_data()
vd = self.get_image_data()
d = {
'v':vd,
'a':ad
}
return d

View File

@ -0,0 +1,104 @@
from traceback import print_exc
from kivy.app import App
from kivy.logger import Logger
from kivy.uix.camera import Camera
from kivy.properties import BooleanProperty, NumericProperty
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.graphics import PushMatrix, Rotate, PopMatrix
from kivy.graphics.texture import Texture
import kivy
import numpy as np
import cv2
from kivy.base import Builder
from .image_processing.image_processing import face_detection
from .xcamera.xcamera import XCamera
class CustomCamera(XCamera):
detectFaces = BooleanProperty(False)
angle_map = {
0:90,
1:0,
2:270,
3:180
}
def __init__(self, **kwargs):
super(CustomCamera, self).__init__(**kwargs)
self.isAndroid = kivy.platform == "android"
self.app = App.get_running_app()
def on_tex(self, camera):
texture = camera.texture
image = np.frombuffer(texture.pixels, dtype='uint8')
image = image.reshape(texture.height, texture.width, -1)
size1 = image.shape
x = 3
if self.isAndroid:
x = self.app.get_rotation()
y = self.angle_map[x]
x = y / 90
if x > 0:
image = np.rot90(image,x)
if self.detectFaces:
try:
image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)
_image, faceRect = face_detection(image, (0, 255, 0, 255))
image = cv2.cvtColor(_image, cv2.COLOR_BGR2RGBA)
except Exception as e:
print('custom_camera.py:Exception:',e)
print_exc()
h,w,_ = image.shape
numpy_data = image.tostring()
self.texture = Texture.create(size=(w,h), \
colorfmt='rgba')
self.texture.blit_buffer(numpy_data,
size=(w,h),
bufferfmt="ubyte", colorfmt='rgba')
self.texture_size = list(self.texture.size)
self.canvas.ask_update()
return
def change_index(self, *args):
new_index = 1 if self.index == 0 else 0
self._camera._set_index(new_index)
self.index = new_index
self.angle = -90 if self.index == 0 else 90
def get_cameras_count(self):
cameras = 1
if self.isAndroid:
cameras = self._camera.get_camera_count()
return cameras
def dismiss(self, *args, **kw):
self.play = False
cv2.destroyAllWindows()
class QrReader(CustomCamera):
def __init__(self, **kw):
super(QrReader, self).__init__(**kw)
self.qr_reader = cv2.QRCodeDetector()
self.register_event_type('on_data')
self.qr_result = None
Logger.info('QrReader:Initialed')
def getValue(self):
return {
"qr_result":self.qr_result
}
def on_data(self, d):
print('data=',d)
def on_tex(self, camera):
super(QrReader, self).on_tex(camera)
image = np.frombuffer(self.texture.pixels, dtype='uint8')
image = image.reshape(self.texture.height, self.texture.width, -1)
image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)
self.qr_result, bbox,_ = self.qr_reader.detectAndDecode(image)
if self.qr_result:
self.dispatch('on_data',self.qr_result)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,57 @@
import os
import cv2
# import imutils
import numpy as np
def simple_return(image):
return image
def crop_image(image):
return image[0:350, 0:350]
curdir = os.path.dirname(__file__)
pattern_file = os.path.join(curdir,'/cascades/haarcascade_frontalface_default.xml')
detector = cv2.CascadeClassifier(pattern_file)
def face_detection(image, rect_color, rotation=-90):
if rotation == 90:
image = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
if rotation == -90:
image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
orig_image = image.copy()
height, width = orig_image.shape[:2]
new_width = 300
r = new_width / float(width)
dim = (new_width, int(height * r))
ratio = (width / dim[0], height / dim[1])
image = cv2.resize(image, dim)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if not detector:
print('image_processing.py:detector is None')
return org_image, None
faceRects = detector.detectMultiScale(image,
scaleFactor=1.2,
minNeighbors=5,
minSize=(20, 20),
flags=cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in faceRects:
x = int(x * ratio[0])
y = int(y * ratio[1])
w = x + int(w * ratio[0])
h = y + int(h * ratio[1])
cv2.rectangle(orig_image, (x, y), (w, h), rect_color, 2)
if rotation == 90:
orig_image = cv2.rotate(orig_image, cv2.ROTATE_90_CLOCKWISE)
if rotation == -90:
orig_image = cv2.rotate(orig_image, cv2.ROTATE_90_COUNTERCLOCKWISE)
return orig_image, faceRects

View File

@ -0,0 +1,88 @@
import os
from kivy.app import App
from kivy.uix.image import Image
from kivy.clock import Clock
from kivy.graphics.texture import Texture
from kivy.factory import Factory
import cv2
facefilepath=os.path.dirname(cv2.__file__)
facepattern = '%s/%s' % (facefilepath, \
'data/haarcascade_frontalface_default.xml')
def set_res(cap, x,y):
cap.set(cv2.CAP_PROP_FRAME_WIDTH, int(x))
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, int(y))
return cap.get(cv2.CAP_PROP_FRAME_WIDTH),cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
class KivyCamera(Image):
def __init__(self, device=0, fps=25.0, face_detect=False, **kwargs):
print('KivyCamera inited')
self.update_task = None
self.capture = None
super(KivyCamera, self).__init__(**kwargs)
self.capture = cv2.VideoCapture(device)
self.face_detect = face_detect
self.device = device
self.faceCascade = None
if face_detect:
self.faceCascade = cv2.CascadeClassifier(facepattern)
if not self.faceCascade:
print('self.faceCascade is None')
self.update_task = Clock.schedule_interval(self.update, 1.0 / fps)
def on_size(self,o,size):
if self.capture:
self.capture.release()
self.capture = cv2.VideoCapture(self.device)
size = set_res(self.capture,self.width,self.height)
print(size)
def add_face_detect(self,frame):
frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.faceCascade.detectMultiScale(frameGray,
scaleFactor = 1.2, minNeighbors = 5)
print('add_face_detect(): faces=',faces)
# THIS LINE RAISE ERROR
# faces = self.faceCascade.detectMultiScale(frameGray, 1.1, 4)
for (x, y, w, h) in faces: # added
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
return frame
def update(self, dt):
ret, frame = self.capture.read()
if ret:
if self.width / self.height > frame.shape[1] / frame.shape[0]:
fx = fy = self.height / frame.shape[0]
else:
fx = fy = self.width / frame.shape[1]
frame = cv2.resize(frame, None,
fx=fx, fy=fy,
interpolation=cv2.INTER_LINEAR)
if self.faceCascade:
try:
frame = self.add_face_detect(frame)
except Exception as e:
print('Error, e=',e)
pass
# convert it to texture
buf1 = cv2.flip(frame, 0)
buf = buf1.tostring()
image_texture = Texture.create(
size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
# display image from the texture
self.texture = image_texture
else:
self.update_task.cancel()
print('failed to read from capture')
def __del__(self):
if self.update_task:
self.update_task.cancel()
self.update_task = None
# self.cupture.close()

84
kivycv/micphone.py 100644
View File

@ -0,0 +1,84 @@
from threading import Thread, Lock
import pyaudio
from kivy.event import EventDispatcher
from kivy.properties import NumericProperty, ObjectProperty
from kivy.clock import Clock
import wave
CHUNK = 1024
CHANNELS = 2
FORMAT = pyaudio.paInt16
samplerate = 48000
class Micphone(EventDispatcher):
channels = NumericProperty(2)
samplerate = NumericProperty(48000)
fps = NumericProperty(1/60)
def __init__(self, **kw):
super(Micphone, self).__init__(**kw)
self.chunk = CHUNK
self.format = pyaudio.Int16
self.chunk_buffer = []
self.lock = Lock()
self.recoding = False
self._audio = puaudio.PyAudio()
self._mic = p.open(format=self.format,
channels=self.channels,
rate=self.samplerate,
input=True,
frames_per_buffer=self.chunk)
self.sampe_size = self.audio.get_sample_size(self.format)
self.register_event_type('on_fps')
self.register_event_type('on_start')
self.register_event_type('on_end')
self.fps_task = None
def on_fps(self, d):
print('on_fps fired')
def on_start(self):
print('on_start fired')
def on_end(self):
print('on_end fired')
def audio_profile(self):
return {
'channels':self.channels,
'sample_size':self.sample_size,
'sample_rate':self.samplerate
}
def get_frames(self, *args):
d = self.get_fps_data()
self.dispatch('on_fps', d)
def get_fps_data(self, *args):
self.lock.acquire()
d = self.chunk_buffer[:]
self.chunk_buffer = []
self.lock.release()
return d
def start(self, *args):
self.recording = True
Background(self._record)
self.dispatch('on_start')
self.fps_task = Clock.schedule_interval(self.get_frames, self.fps)
def _record(self):
self.recording = True
while self.recording:
data = self._mic.read(self.chunk)
self.lock.acquire()
self.hunk_buffer.append(data)
self.lock.release()
def stop(self, *args):
self.recording = False
self.fps_task.cancel()
self.dispatch('on_end')
def close(self):
self._mic.stop_stream()
self._mic.close()
self._audio.close()

View File

@ -0,0 +1,69 @@
import numpy as np
import cv2
from kivy.app import App
from kivy.properties import BooleanProperty
from kivy.core.window import Window
from kivy.graphics.texture import Texture
from kivy.uix.image import Image
from kivy.clock import Clock
class QRCodeReader(Image):
opened = BooleanProperty(False)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.register_event_type('on_data')
self.task = None
if self.opened:
self.open()
def open(self):
self.opened = True
self.cam = cv2.VideoCapture(0)
self.detector = cv2.QRCodeDetector()
self.task = Clock.schedule_interval(self.read,1.0/30.0)
Window.add_widget(self)
def on_data(self,d):
print('QRCodeReader().on_data(),data=',d)
self.dismiss()
def on_touch_down(self,touch):
if not self.colide_point(*touch.pos):
self.dismiss()
super().on_touch_down(touch)
def dismiss(self, *args, **kw):
if not self.opened:
return
self.opened = False
self.task.cancel()
self.task = None
self.cam.release()
cv2.destroyAllWindows()
Window.remove_widget(self)
def showImage(self,img):
image = np.rot90(np.swapaxes(img,0,1))
tex = Texture.create(size=(image.shape[1], image.shape[0]),
colorfmt='rgb')
tex.blit_buffer(image.tostring(),colorfmt='rgb', bufferfmt='ubyte')
self.texture = tex
def read(self,p):
_, img = self.cam.read()
self.showImage(img)
data,bbox,_ = self.detector.detectAndDecode(img)
if data:
d = {
'data':data
}
self.dispatch('on_data',d)
if __name__ == '__main__':
class MyApp(App):
def build(self):
r = QRCodeReader()
return r
myapp = MyApp()
myapp.run()

View File

@ -0,0 +1 @@
__version__ = '0.1.0'