from .config import * from .model import * # from .model.tokenizer import * from .configuration_olmo import OLMoConfig from .modeling_olmo import OLMoForCausalLM from .modeling_olmo import OLMoForSequenceCLS from .tokenization_olmo_fast import OLMoTokenizerFast def check_install(cuda: bool = False): import torch from .version import VERSION if cuda: assert torch.cuda.is_available(), "CUDA is not available!" print("CUDA available") print(f"OLMo v{VERSION} installed")