Spaces:
Paused
Paused
alessandro trinca tornidor
commited on
Commit
·
60fa201
1
Parent(s):
52318ec
[refactor] prepare packaging moving all the modules under 'lisa_on_cuda' (renamed from 'model')
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .idea/lisa-on-gpu.iml +1 -1
- .idea/misc.xml +1 -1
- {model → lisa_on_cuda}/LISA.py +0 -0
- {app → lisa_on_cuda}/__init__.py +0 -0
- {model → lisa_on_cuda/app}/__init__.py +0 -0
- {app → lisa_on_cuda/app}/chat.py +5 -5
- {app → lisa_on_cuda/app}/main.py +2 -2
- {app → lisa_on_cuda/app}/merge_lora_weights_and_save_hf_model.py +2 -2
- {app → lisa_on_cuda/app}/routes.py +1 -1
- {app → lisa_on_cuda/app}/train_ds.py +4 -4
- {model → lisa_on_cuda}/llava/__init__.py +0 -0
- {model → lisa_on_cuda}/llava/constants.py +0 -0
- {model → lisa_on_cuda}/llava/conversation.py +0 -0
- {model → lisa_on_cuda}/llava/mm_utils.py +0 -0
- {model → lisa_on_cuda}/llava/model/__init__.py +0 -0
- {model → lisa_on_cuda}/llava/model/apply_delta.py +2 -1
- {model → lisa_on_cuda}/llava/model/builder.py +5 -5
- {model → lisa_on_cuda}/llava/model/consolidate.py +1 -2
- {model → lisa_on_cuda}/llava/model/language_model/llava_llama.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/llava_mpt.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/mpt/adapt_tokenizer.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/mpt/attention.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/mpt/blocks.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/mpt/configuration_mpt.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/mpt/custom_embedding.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/mpt/flash_attn_triton.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/mpt/hf_prefixlm_converter.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/mpt/meta_init_context.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/mpt/modeling_mpt.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/mpt/norm.py +0 -0
- {model → lisa_on_cuda}/llava/model/language_model/mpt/param_init_fns.py +0 -0
- {model → lisa_on_cuda}/llava/model/llava_arch.py +1 -4
- {model → lisa_on_cuda}/llava/model/make_delta.py +1 -1
- {model → lisa_on_cuda}/llava/model/multimodal_encoder/builder.py +0 -0
- {model → lisa_on_cuda}/llava/model/multimodal_encoder/clip_encoder.py +0 -0
- {model → lisa_on_cuda}/llava/model/utils.py +0 -0
- {model → lisa_on_cuda}/llava/train/llama_flash_attn_monkey_patch.py +0 -0
- {model → lisa_on_cuda}/llava/train/llava_trainer.py +0 -0
- {model → lisa_on_cuda}/llava/train/train.py +0 -0
- {model → lisa_on_cuda}/llava/train/train_mem.py +3 -3
- {model → lisa_on_cuda}/llava/utils.py +1 -1
- {model → lisa_on_cuda}/segment_anything/__init__.py +0 -0
- {model → lisa_on_cuda}/segment_anything/automatic_mask_generator.py +0 -0
- {model → lisa_on_cuda}/segment_anything/build_sam.py +0 -0
- {model → lisa_on_cuda}/segment_anything/modeling/__init__.py +0 -0
- {model → lisa_on_cuda}/segment_anything/modeling/common.py +0 -0
- {model → lisa_on_cuda}/segment_anything/modeling/image_encoder.py +0 -0
- {model → lisa_on_cuda}/segment_anything/modeling/mask_decoder.py +0 -0
- {model → lisa_on_cuda}/segment_anything/modeling/prompt_encoder.py +0 -0
- {model → lisa_on_cuda}/segment_anything/modeling/sam.py +0 -0
.idea/lisa-on-gpu.iml
CHANGED
@@ -4,7 +4,7 @@
|
|
4 |
<content url="file://$MODULE_DIR$">
|
5 |
<excludeFolder url="file://$MODULE_DIR$/venv" />
|
6 |
</content>
|
7 |
-
<orderEntry type="
|
8 |
<orderEntry type="sourceFolder" forTests="false" />
|
9 |
</component>
|
10 |
<component name="PyDocumentationSettings">
|
|
|
4 |
<content url="file://$MODULE_DIR$">
|
5 |
<excludeFolder url="file://$MODULE_DIR$/venv" />
|
6 |
</content>
|
7 |
+
<orderEntry type="jdk" jdkName="Python 3.10 (lisa_on_cuda)" jdkType="Python SDK" />
|
8 |
<orderEntry type="sourceFolder" forTests="false" />
|
9 |
</component>
|
10 |
<component name="PyDocumentationSettings">
|
.idea/misc.xml
CHANGED
@@ -3,5 +3,5 @@
|
|
3 |
<component name="Black">
|
4 |
<option name="sdkName" value="Python 3.10 (lisa-on-gpu)" />
|
5 |
</component>
|
6 |
-
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10 (
|
7 |
</project>
|
|
|
3 |
<component name="Black">
|
4 |
<option name="sdkName" value="Python 3.10 (lisa-on-gpu)" />
|
5 |
</component>
|
6 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10 (lisa_on_cuda)" project-jdk-type="Python SDK" />
|
7 |
</project>
|
{model → lisa_on_cuda}/LISA.py
RENAMED
File without changes
|
{app → lisa_on_cuda}/__init__.py
RENAMED
File without changes
|
{model → lisa_on_cuda/app}/__init__.py
RENAMED
File without changes
|
{app → lisa_on_cuda/app}/chat.py
RENAMED
@@ -7,11 +7,11 @@ import numpy as np
|
|
7 |
import torch
|
8 |
from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
|
9 |
|
10 |
-
from
|
11 |
-
from
|
12 |
-
from
|
13 |
-
from
|
14 |
-
from utils import app_helpers, utils
|
15 |
|
16 |
|
17 |
def main(args):
|
|
|
7 |
import torch
|
8 |
from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
|
9 |
|
10 |
+
from lisa_on_cuda.LISA import LISAForCausalLM
|
11 |
+
from lisa_on_cuda.llava import conversation as conversation_lib
|
12 |
+
from lisa_on_cuda.llava.mm_utils import tokenizer_image_token
|
13 |
+
from lisa_on_cuda.segment_anything.utils.transforms import ResizeLongestSide
|
14 |
+
from ..utils import app_helpers, utils
|
15 |
|
16 |
|
17 |
def main(args):
|
{app → lisa_on_cuda/app}/main.py
RENAMED
@@ -6,8 +6,8 @@ from fastapi import FastAPI
|
|
6 |
from fastapi.staticfiles import StaticFiles
|
7 |
from fastapi.templating import Jinja2Templates
|
8 |
|
9 |
-
from
|
10 |
-
from utils import app_helpers, session_logger
|
11 |
|
12 |
|
13 |
session_logger.change_logging(logging.DEBUG)
|
|
|
6 |
from fastapi.staticfiles import StaticFiles
|
7 |
from fastapi.templating import Jinja2Templates
|
8 |
|
9 |
+
from . import routes
|
10 |
+
from ..utils import app_helpers, session_logger
|
11 |
|
12 |
|
13 |
session_logger.change_logging(logging.DEBUG)
|
{app → lisa_on_cuda/app}/merge_lora_weights_and_save_hf_model.py
RENAMED
@@ -11,8 +11,8 @@ import transformers
|
|
11 |
from peft import LoraConfig, get_peft_model
|
12 |
from transformers import AutoTokenizer
|
13 |
|
14 |
-
from
|
15 |
-
from utils.utils import DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN
|
16 |
|
17 |
|
18 |
def parse_args(args):
|
|
|
11 |
from peft import LoraConfig, get_peft_model
|
12 |
from transformers import AutoTokenizer
|
13 |
|
14 |
+
from lisa_on_cuda.LISA import LISAForCausalLM
|
15 |
+
from ..utils.utils import DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN
|
16 |
|
17 |
|
18 |
def parse_args(args):
|
{app → lisa_on_cuda/app}/routes.py
RENAMED
@@ -2,7 +2,7 @@ import json
|
|
2 |
import logging
|
3 |
from fastapi import APIRouter
|
4 |
|
5 |
-
from utils import session_logger
|
6 |
|
7 |
|
8 |
router = APIRouter()
|
|
|
2 |
import logging
|
3 |
from fastapi import APIRouter
|
4 |
|
5 |
+
from ..utils import session_logger
|
6 |
|
7 |
|
8 |
router = APIRouter()
|
{app → lisa_on_cuda/app}/train_ds.py
RENAMED
@@ -13,10 +13,10 @@ import transformers
|
|
13 |
from peft import LoraConfig, get_peft_model
|
14 |
from torch.utils.tensorboard import SummaryWriter
|
15 |
|
16 |
-
from
|
17 |
-
from
|
18 |
-
from utils.dataset import HybridDataset, ValDataset, collate_fn
|
19 |
-
from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
|
20 |
AverageMeter, ProgressMeter, Summary, dict_to_cuda,
|
21 |
intersectionAndUnionGPU)
|
22 |
|
|
|
13 |
from peft import LoraConfig, get_peft_model
|
14 |
from torch.utils.tensorboard import SummaryWriter
|
15 |
|
16 |
+
from lisa_on_cuda.LISA import LISAForCausalLM
|
17 |
+
from lisa_on_cuda.llava import conversation as conversation_lib
|
18 |
+
from ..utils.dataset import HybridDataset, ValDataset, collate_fn
|
19 |
+
from ..utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
|
20 |
AverageMeter, ProgressMeter, Summary, dict_to_cuda,
|
21 |
intersectionAndUnionGPU)
|
22 |
|
{model → lisa_on_cuda}/llava/__init__.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/constants.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/conversation.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/mm_utils.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/__init__.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/apply_delta.py
RENAMED
@@ -5,10 +5,11 @@ python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~
|
|
5 |
import argparse
|
6 |
|
7 |
import torch
|
8 |
-
from llava import LlavaLlamaForCausalLM
|
9 |
from tqdm import tqdm
|
10 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
11 |
|
|
|
|
|
12 |
|
13 |
def apply_delta(base_model_path, target_model_path, delta_path):
|
14 |
print("Loading base model")
|
|
|
5 |
import argparse
|
6 |
|
7 |
import torch
|
|
|
8 |
from tqdm import tqdm
|
9 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
10 |
|
11 |
+
from .language_model.llava_llama import LlavaLlamaForCausalLM
|
12 |
+
|
13 |
|
14 |
def apply_delta(base_model_path, target_model_path, delta_path):
|
15 |
print("Loading base model")
|
{model → lisa_on_cuda}/llava/model/builder.py
RENAMED
@@ -17,11 +17,11 @@ import os
|
|
17 |
import shutil
|
18 |
|
19 |
import torch
|
20 |
-
from
|
21 |
-
|
22 |
-
from
|
23 |
-
from
|
24 |
-
|
25 |
|
26 |
|
27 |
def load_pretrained_model(
|
|
|
17 |
import shutil
|
18 |
|
19 |
import torch
|
20 |
+
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
21 |
+
|
22 |
+
from .language_model.llava_llama import LlavaLlamaForCausalLM
|
23 |
+
from .language_model.llava_mpt import LlavaMPTForCausalLM
|
24 |
+
from ..constants import DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_PATCH_TOKEN
|
25 |
|
26 |
|
27 |
def load_pretrained_model(
|
{model → lisa_on_cuda}/llava/model/consolidate.py
RENAMED
@@ -5,8 +5,7 @@ python3 -m llava.model.consolidate --src ~/model_weights/llava-7b --dst ~/model_
|
|
5 |
import argparse
|
6 |
|
7 |
import torch
|
8 |
-
from
|
9 |
-
from llava.model.utils import auto_upgrade
|
10 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
11 |
|
12 |
|
|
|
5 |
import argparse
|
6 |
|
7 |
import torch
|
8 |
+
from .utils import auto_upgrade
|
|
|
9 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
10 |
|
11 |
|
{model → lisa_on_cuda}/llava/model/language_model/llava_llama.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/llava_mpt.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/mpt/adapt_tokenizer.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/mpt/attention.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/mpt/blocks.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/mpt/configuration_mpt.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/mpt/custom_embedding.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/mpt/flash_attn_triton.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/mpt/hf_prefixlm_converter.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/mpt/meta_init_context.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/mpt/modeling_mpt.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/mpt/norm.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/language_model/mpt/param_init_fns.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/llava_arch.py
RENAMED
@@ -18,10 +18,7 @@ from abc import ABC, abstractmethod
|
|
18 |
import torch
|
19 |
import torch.nn as nn
|
20 |
|
21 |
-
|
22 |
-
from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
|
23 |
-
DEFAULT_IMAGE_PATCH_TOKEN, IGNORE_INDEX,
|
24 |
-
IMAGE_TOKEN_INDEX)
|
25 |
|
26 |
from .multimodal_encoder.builder import build_vision_tower
|
27 |
|
|
|
18 |
import torch
|
19 |
import torch.nn as nn
|
20 |
|
21 |
+
from lisa_on_cuda.utils.utils import IGNORE_INDEX, IMAGE_TOKEN_INDEX
|
|
|
|
|
|
|
22 |
|
23 |
from .multimodal_encoder.builder import build_vision_tower
|
24 |
|
{model → lisa_on_cuda}/llava/model/make_delta.py
RENAMED
@@ -5,7 +5,7 @@ python3 -m llava.model.make_delta --base ~/model_weights/llama-7b --target ~/mod
|
|
5 |
import argparse
|
6 |
|
7 |
import torch
|
8 |
-
from
|
9 |
from tqdm import tqdm
|
10 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
11 |
|
|
|
5 |
import argparse
|
6 |
|
7 |
import torch
|
8 |
+
from .utils import auto_upgrade
|
9 |
from tqdm import tqdm
|
10 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
11 |
|
{model → lisa_on_cuda}/llava/model/multimodal_encoder/builder.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/multimodal_encoder/clip_encoder.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/model/utils.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/train/llama_flash_attn_monkey_patch.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/train/llava_trainer.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/train/train.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/llava/train/train_mem.py
RENAMED
@@ -3,12 +3,12 @@
|
|
3 |
# Make it more memory efficient by monkey patching the LLaMA model with FlashAttn.
|
4 |
|
5 |
# Need to call this before importing transformers.
|
6 |
-
from
|
7 |
-
replace_llama_attn_with_flash_attn
|
8 |
|
9 |
replace_llama_attn_with_flash_attn()
|
10 |
|
11 |
-
from
|
|
|
12 |
|
13 |
if __name__ == "__main__":
|
14 |
train()
|
|
|
3 |
# Make it more memory efficient by monkey patching the LLaMA model with FlashAttn.
|
4 |
|
5 |
# Need to call this before importing transformers.
|
6 |
+
from .llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn
|
|
|
7 |
|
8 |
replace_llama_attn_with_flash_attn()
|
9 |
|
10 |
+
from .train import train
|
11 |
+
|
12 |
|
13 |
if __name__ == "__main__":
|
14 |
train()
|
{model → lisa_on_cuda}/llava/utils.py
RENAMED
@@ -5,7 +5,7 @@ import os
|
|
5 |
import sys
|
6 |
|
7 |
import requests
|
8 |
-
from
|
9 |
|
10 |
server_error_msg = (
|
11 |
"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
|
|
5 |
import sys
|
6 |
|
7 |
import requests
|
8 |
+
from .constants import LOGDIR
|
9 |
|
10 |
server_error_msg = (
|
11 |
"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
{model → lisa_on_cuda}/segment_anything/__init__.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/segment_anything/automatic_mask_generator.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/segment_anything/build_sam.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/segment_anything/modeling/__init__.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/segment_anything/modeling/common.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/segment_anything/modeling/image_encoder.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/segment_anything/modeling/mask_decoder.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/segment_anything/modeling/prompt_encoder.py
RENAMED
File without changes
|
{model → lisa_on_cuda}/segment_anything/modeling/sam.py
RENAMED
File without changes
|