有時手上的 GPU memory 不夠跑模型,舉例:eva-clip-8b,可以使用 accelerate 將部分參數 offload 到 CPU 上。
https://github.com/baaivision/EVA/issues/147
https://blog.csdn.net/qq_42363032/article/details/139597486
我的學習紀錄。
有時手上的 GPU memory 不夠跑模型,舉例:eva-clip-8b,可以使用 accelerate 將部分參數 offload 到 CPU 上。
https://github.com/baaivision/EVA/issues/147
https://blog.csdn.net/qq_42363032/article/details/139597486
https://stackoverflow.com/questions/65037368/remove-downloaded-tensorflow-and-pytorchhugging-face-models
pip install huggingface_hub["cli"]
https://answers.microsoft.com/en-us/windows/forum/all/optimize-vhd-not-found-in-windows-10-home/a727b760-0f82-4d0f-8480-d49eeaeb11a2
照做即可。
class Solution { public: int furthestBuilding(vector<int>& heights, int bricks, int ladders) { long long int sum = 0, topKSum = 0, i; priority_queue<int, vector<int>, greater<int>> pq; for(i=1; i<heights.size(); i++) { int diff = heights[i] - heights[i-1]; if(diff > 0) { sum += diff; topKSum += diff; pq.push(diff); if(pq.size() > ladders) { topKSum -= pq.top(); pq.pop(); } if(sum - topKSum > bricks) break; } } return i-1; } };
1. 比較
bool cmp(<T>& a, <T>& b)
{
return a > b;
}
sort(v.begin(), v.end(), cmp());
Note: cmp 裡面 return 的順序等於希望最終長怎樣的條件
問題:
用 ssh 連線時出現錯誤訊息:Bad permissions. Try removing permissions for user: LAPTOP-xxxxxx\\user2 on file C:/Users/user1/.ssh/config.
(user1 是我自己在用的帳號,user2 是電腦中存在但不是正在用的帳號。)
解決方法:
先進到 C:\Users\user1\.ssh
對 .ssh 右鍵 > 關閉繼承 > r將繼承的權限轉換物件的明確權限
接著把 user2 移除,最後按 apply
Done!
Ref:
https://stackoverflow.com/questions/49926386/openssh-windows-bad-owner-or-permissions
https://leesonhsu.blogspot.com/2021/03/ssh-key-windows-permission-denied.html
"name": Service 名稱
"central_broker" & "app_broker": port 與 IP 的設定
"host"
"management_port"
"connection_port"
"input": DataName_DataFormat (ex: png_image, acc_nparray)
"output": DataName_DataFormat (ex: fall_text, speech_text)
"topology": "source" & "destination" pair
"type": "input", "output", "server"
"queue":
if type == "input" or "output": DataName_DataFormat
else: ModelName_<input/output>_DataFormat
(ex: "HumanDetector_output_image", "FallDetectorLSTM_input_nparray")
(ex:
"source":
"type": "input",
"queue": "png_image"
"destination":
"type": "server",
"queue": "FallDetectorGCN_input_image")
不用 central broker 版本:
import pika, sys, os
import numpy as np
import cv2
import torch
from torch import nn
import torchvision
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# load the COCO dataset category names
# we will use the same list for this notebook
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
model.eval()
class PersonDetector():
def __init__(self) -> None:
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host='192.168.56.1', port=5672))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange="PersonDetector", exchange_type="topic", auto_delete=True, arguments={"output":["PersonDetector_output_text"]})
self.channel.queue_declare(queue='PersonDetector_input_image', exclusive=True)
self.channel.queue_bind(queue="PersonDetector_input_image", exchange="PersonDetector", routing_key=f"*.*.*.image")
# load model
print("Start loading model")
#model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
#model.eval()
print("Load model successfully")
def __callback(self, ch, method, properties, body):
if "PersonDetector" in method.routing_key:
pass
else:
routing_key_tokens = method.routing_key.split(".")
app_name = routing_key_tokens[0]
client_id = routing_key_tokens[1]
# preprocessing
img_bytes = np.frombuffer(body, dtype=np.uint8)
img = cv2.imdecode(img_bytes, 1)
cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255.0
img = torch.tensor(img)
img = img.permute(2, 0, 1)
# detect
pred = model([img])
pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())]
pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())]
pred_score = list(pred[0]['scores'].detach().numpy())
pred_t = [pred_score.index(x) for x in pred_score if x>0.7][-1]
pred_boxes = pred_boxes[:pred_t+1]
pred_class = pred_class[:pred_t+1]
for cls in pred_class:
if cls == "person":
print(f"person detect!!!")
def run(self):
self.channel.basic_consume(queue='PersonDetector_input_image', on_message_callback=self.__callback, auto_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
self.channel.start_consuming()
if __name__ == '__main__':
try:
detector = PersonDetector()
detector.run()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
==========
import pika
import pandas as pd
import numpy as np
connection = pika.BlockingConnection(pika.ConnectionParameters(host='192.168.56.1', port=5672))
channel = connection.channel()
with open(f"C:\\Users\\Cherry\\Downloads\\image.jpg", "rb") as f:
data = f.read()
channel.basic_publish(exchange='PersonDetector',
routing_key=f'PersonDetection.client0.null.image',
body=data)
connection.close()
print("finish")