From 05fa9e411c63525448124dc690c9964e006c1faf Mon Sep 17 00:00:00 2001 From: yumoqing Date: Tue, 12 Nov 2024 16:39:19 +0800 Subject: [PATCH] bugfix --- README.md | 18 +++++++- app/qwenvl.py | 105 +++++++++++++++++++++++++++++++++++++++++++++++ app/test.py | 29 +++++++++++++ conf/config.json | 1 + requirements.txt | 2 + 5 files changed, 153 insertions(+), 2 deletions(-) create mode 100644 app/qwenvl.py create mode 100644 app/test.py diff --git a/README.md b/README.md index c654954..c4f11bc 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ # Qwen2-VL deployment instances -# dependents +## dependents git+https://git.kaiyuancloud.cn/yumoqing/apppublic git+https://git.kaiyuancloud.cn/yumoqing/ahserver -# preinstallation +## preinstallation first, create a new python virtual env ``` python3 -m venv ~/vl @@ -39,3 +39,17 @@ sudo isntall.sh ## Change model or http port there is a config.json file under qwenvl folder, change the "modelname" and "port" value to suite your requirements +## model to use + +* Qwen/Qwen2-VL-7B-Instruct-AWQ +* Qwen/Qwen2-VL-7B-Instruct +* Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4 +* Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int8 +* Qwen/Qwen2-VL-72B-Instruct +* Qwen/Qwen2-VL-72B-Instruct-AWQ +* Qwen/Qwen2-VL-72B-Instruct-GPTQ-Int4 +* Qwen/Qwen2-VL-72B-Instruct-GPTQ-Int8 +* Qwen/Qwen2-VL-2B-Instruct +* Qwen/Qwen2-VL-2B-Instruct-AWQ +* Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4 +* Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8 diff --git a/app/qwenvl.py b/app/qwenvl.py new file mode 100644 index 0000000..77114cb --- /dev/null +++ b/app/qwenvl.py @@ -0,0 +1,105 @@ +import torch +from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor +from qwen_vl_utils import process_vision_info +from appPublic.worker import awaitify +from appPublic.jsonConfig import getConfig +from ahserver.serverenv import ServerEnv +from ahserver.webapp import webapp + +class Qwen2VLClass: + def __init__(self, modelname): + # default: Load the model on the available device(s) + self.model = Qwen2VLForConditionalGeneration.from_pretrained( + modelname, + torch_dtype=torch.bfloat16, + # attn_implementation="flash_attention_2", + device_map="auto" + ) + self.min_pixels = 256 * 28 * 28 + self.max_pixels = 1280 * 28 * 28 + # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios. + # model = Qwen2VLForConditionalGeneration.from_pretrained( + # "Qwen/Qwen2-VL-7B-Instruct", + # torch_dtype=torch.bfloat16, + # attn_implementation="flash_attention_2", + # device_map="auto", + # ) + + # default processer + self.processor = AutoProcessor.from_pretrained(modelname, + min_pixels=self.min_pixels, + max_pixels=self.max_pixels + ) + + # The default range for the number of visual tokens per image in the model is 4-16384. + # You can set min_pixels and max_pixels according to your needs, such as a token range of 256-1280, to balance performance and cost. + # min_pixels = 256*28*28 + # max_pixels = 1280*28*28 + # processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels) + + def inference(self, prompt, image=None, videofile=None): + content = [ + { + "type":"text", + "text":prompt + } + ] + if image: + if not image.startswith('file:///') \ + and not image.startswith('http://') \ + and not image.startswith('https://'): + image = f'data:image;base64,{image}' + content.append({ + "type":"image", + "image":image + }) + if videofile: + if not videofile.startswith('file:///'): + return 'only local video file support' + + content.append({ + "type":"video", + "video":videofile + }) + + messages = [ + { + "role": "user", + "content": content + } + ] + + # Preparation for inference + text = self.processor.apply_chat_template( + messages, tokenize=False, add_generation_prompt=True + ) + image_inputs, video_inputs = process_vision_info(messages) + inputs = self.processor( + text=[text], + images=image_inputs, + videos=video_inputs, + padding=True, + return_tensors="pt", + ) + inputs = inputs.to("cuda") + + # Inference: Generation of the output + generated_ids = self.model.generate(**inputs, max_new_tokens=128) + generated_ids_trimmed = [ + out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + return output_text + +def main(): + config = getConfig() + modelname = config.modelname + m = Qwen2VLClass(modelname) + g = ServerEnv() + g.inference = awaitify(m.inference) + + +if __name__ == '__main__': + webapp(main) diff --git a/app/test.py b/app/test.py new file mode 100644 index 0000000..8d32925 --- /dev/null +++ b/app/test.py @@ -0,0 +1,29 @@ +import requests +import base64 + +def file2b64(file_path): + # 读取文件内容 + with open(file_path, 'rb') as file: + file_content = file.read() + + # 将文件内容转换为Base64编码 + base64_encoded_data = base64.b64encode(file_content) + + # 将Base64编码的数据转换为字符串 + base64_encoded_str = base64_encoded_data.decode('utf-8') + + return base64_encoded_str + +while True: + print('prompt:') + p = input() + print('input image path:') + i = input() + if p == '' or i == '': + continue + ret = requests.get('http://pd4e.com:10090/api', + params={ + 'prompt':p, + 'image':file2b64(i) + }) + print(ret.text) diff --git a/conf/config.json b/conf/config.json index 684fd84..bb220ef 100755 --- a/conf/config.json +++ b/conf/config.json @@ -1,6 +1,7 @@ { "password_key":"!@#$%^&*(*&^%$QWERTYUIqwertyui234567", "modelname":"Qwen/Qwen2-VL-7B-Instruct", + "modelname":"Qwen/Qwen2-VL-2B-Instruct", "logger":{ "name":"qwenvl", "levelname":"info", diff --git a/requirements.txt b/requirements.txt index b139459..8644c6a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ git+https://git.kaiyuancloud.cn/yumoqing/apppublic git+https://git.kaiyuancloud.cn/yumoqing/ahserver +optimum +auto_gptq