diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 2e3761f5..a2a02d7e 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -528,6 +528,7 @@ def get_chat_template( chat_template, stop_word = chat_template assert(type(chat_template) is str) assert(type(stop_word) is str) + ollama_modelfile = None elif type(chat_template) is str: diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f2f79de8..3d969d7d 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1423,9 +1423,38 @@ def get_peft_model( transformers_set_seed(random_state) if isinstance(model, PeftModelForCausalLM): - raise TypeError( - "Unsloth: Your model already has LoRA adapters. No need to run this again!" + # Check if exactly the same and then pass through! + assert(hasattr(model, "peft_config")) + + peft_config = model.peft_config["default"].to_dict() + check_parameters = [ + "r", "lora_alpha", "lora_dropout", + "bias", "layers_to_transform", "layers_pattern", + "use_rslora", "modules_to_save", "init_lora_weights", + ] + check_all = True + for param in check_parameters: + check_all = check_all and (peft_config[param] == eval(param)) + pass + check_all = check_all and ( + len(set(peft_config["target_modules"]) ^ set(target_modules)) == 0 ) + check_all = check_all and ( + (loftq_config == {} or loftq_config is None) and \ + (peft_config["loftq_config"] == {} or peft_config["loftq_config"] is None) + ) + + if check_all: + # Simply pass through! + logger.warning( + "Unsloth: Already have LoRA adapters! We shall skip this step." + ) + return model + else: + raise TypeError( + "Unsloth: Your model already has LoRA adapters. Your new parameters are different." + ) + pass pass if loftq_config is None: loftq_config = {} diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index de1e2e57..d7c0f076 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -91,21 +91,37 @@ def from_pretrained( model_name = _get_model_name(model_name, load_in_4bit) # First check if it's a normal model via AutoConfig - is_peft = False try: model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) - is_peft = False + is_model = True + except: + is_model = False + try: + peft_config = PeftConfig .from_pretrained(model_name, token = token, revision = revision) + is_peft = True except: - try: - # Most likely a PEFT model - peft_config = PeftConfig.from_pretrained(model_name, token = token, revision = revision) - except: - raise RuntimeError(f"Unsloth: `{model_name}` is not a full model or a PEFT model.") - + is_peft = False + + # Cannot be both! + if is_model and is_peft: + raise RuntimeError( + "Unsloth: You repo has a LoRA adapter and a base model.\n"\ + "You have 2 files `config.json` and `adapter_config.json`.\n"\ + "We must only allow one config file.\n"\ + "Please separate the LoRA and base models to 2 repos." + ) + elif not is_model and not is_peft: + raise RuntimeError( + f"Unsloth: `{model_name}` is not a base model or a PEFT model.\n"\ + "We could not locate a `config.json` or `adapter_config.json` file" + ) + pass + + # Get base model for PEFT: + if is_peft: # Check base model again for PEFT model_name = _get_model_name(peft_config.base_model_name_or_path, load_in_4bit) - model_config = AutoConfig.from_pretrained(model_name, token = token) - is_peft = True + model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) pass model_type = model_config.model_type