from .model.config import * from .model.model import * from .model.tokenizer import * from .model.configuration_olmo import OLMoConfig from .model.modeling_olmo import OLMoForCausalLM from .model.modeling_olmo import OLMoForSequenceCLS from .model.tokenization_olmo_fast import OLMoTokenizerFast def check_install(cuda: bool = False): import torch from .version import VERSION if cuda: assert torch.cuda.is_available(), "CUDA is not available!" print("CUDA available") print(f"OLMo v{VERSION} installed")