unslotUnsloth now supports 89K context for Meta's Llama 3.3 (70B) on a 80GBCopy from unsloth import FastLanguageModel model, tokenizer = FastLanguageModel.from_pretrained(model_name = lora_model, # YOUR MODEL