Modified the Ollama.nix to be the closest possible to the nixpkgs one

This commit is contained in:
Alexandre 2025-12-27 00:47:39 +01:00
parent 3109427d49
commit 3dfcb41244

View File

@ -1,23 +1,48 @@
# src: https://github.com/nix-darwin/nix-darwin/pull/972 {
{ config, lib, pkgs, ... }: config,
with lib; lib,
pkgs,
...
}:
let let
inherit (lib) types;
cfg = config.services.ollama; cfg = config.services.ollama;
in { ollama = lib.getExe cfg.package;
meta.maintainers = [ "velnbur" ]; in
{
options = { options = {
services.ollama = { services.ollama = {
enable = mkOption { enable = lib.mkEnableOption "ollama server for local large language models";
type = types.bool;
default = false; package = lib.mkPackageOption pkgs "ollama" {
description = "Whether to enable the Ollama Daemon."; example = "pkgs.ollama";
extraDescription = ''
On macOS, hardware acceleration is automatically handled by Metal.
The standard `ollama` package should work for most users.
'';
}; };
package = mkOption {
type = types.path; home = lib.mkOption {
default = pkgs.ollama; type = types.str;
description = "This option specifies the ollama package to use."; default = "$HOME/.ollama";
example = "/Users/foo/.ollama";
description = ''
The home directory that the ollama service uses for its data.
On macOS, this defaults to the user's home directory.
'';
}; };
host = mkOption {
models = lib.mkOption {
type = types.str;
default = "${cfg.home}/models";
defaultText = "\${config.services.ollama.home}/models";
example = "/Users/foo/.ollama/models";
description = ''
The directory that the ollama service will read models from and download new models to.
'';
};
host = lib.mkOption {
type = types.str; type = types.str;
default = "127.0.0.1"; default = "127.0.0.1";
example = "0.0.0.0"; example = "0.0.0.0";
@ -25,7 +50,8 @@ in {
The host address which the ollama server HTTP interface listens to. The host address which the ollama server HTTP interface listens to.
''; '';
}; };
port = mkOption {
port = lib.mkOption {
type = types.port; type = types.port;
default = 11434; default = 11434;
example = 11111; example = 11111;
@ -33,44 +59,119 @@ in {
Which port the ollama server listens to. Which port the ollama server listens to.
''; '';
}; };
loadModels = mkOption {
type = types.nullOr types.str; environmentVariables = lib.mkOption {
default = null;
example = "/path/to/ollama/models";
description = ''
The directory that the ollama service will read models from and download new models to.
'';
};
environmentVariables = mkOption {
type = types.attrsOf types.str; type = types.attrsOf types.str;
default = { }; default = { };
example = { example = {
OLLAMA_LLM_LIBRARY = "cpu"; OLLAMA_DEBUG = "1";
HIP_VISIBLE_DEVICES = "0,1"; OLLAMA_NUM_PARALLEL = "2";
}; };
description = '' description = ''
Set arbitrary environment variables for the ollama service. Set arbitrary environment variables for the ollama service.
Be aware that these are only seen by the ollama server (launchd daemon), Be aware that these are only seen by the ollama server (launchd daemon),
not normal invocations like `ollama run`. not normal invocations like `ollama run`.
Since `ollama run` is mostly a shell around the ollama server, this is usually sufficient. Since `ollama run` is mostly a shell around the ollama server, this is usually sufficient.
''; '';
}; };
loadModels = lib.mkOption {
type = types.listOf types.str;
apply = builtins.filter (model: model != "");
default = [ ];
example = [
"llama3.2"
"qwen2.5:7b"
"deepseek-r1:8b"
];
description = ''
Download these models using `ollama pull` after the service starts.
This creates a separate LaunchAgent that will pull the models in the background.
Models are only pulled if not already present.
Search for models at: <https://ollama.com/library>
'';
}; };
}; };
config = mkIf cfg.enable { };
config = lib.mkIf cfg.enable {
# Ajouter le package ollama au PATH système
environment.systemPackages = [ cfg.package ]; environment.systemPackages = [ cfg.package ];
# Service principal ollama
launchd.user.agents.ollama = { launchd.user.agents.ollama = {
path = [ config.environment.systemPath ]; path = [ config.environment.systemPath ];
serviceConfig = { serviceConfig = {
# Garder le service actif en permanence
KeepAlive = true; KeepAlive = true;
# Démarrer au login
RunAtLoad = true; RunAtLoad = true;
ProgramArguments = [ "${cfg.package}/bin/ollama" "serve" ]; # Commande pour lancer ollama
ProgramArguments = [ "${ollama}" "serve" ];
# Variables d'environnement
EnvironmentVariables = cfg.environmentVariables // { EnvironmentVariables = cfg.environmentVariables // {
# L'adresse et le port d'écoute
OLLAMA_HOST = "${cfg.host}:${toString cfg.port}"; OLLAMA_HOST = "${cfg.host}:${toString cfg.port}";
} // (optionalAttrs (cfg.models != null) { # Le répertoire des modèles (on utilise toujours OLLAMA_MODELS)
OLLAMA_MODELS = cfg.models; OLLAMA_MODELS = cfg.models;
}); };
# Logs de sortie standard
StandardOutPath = "${cfg.home}/ollama.log";
StandardErrorPath = "${cfg.home}/ollama-error.log";
}; };
}; };
# Service optionnel pour télécharger les modèles
launchd.user.agents.ollama-model-loader = lib.mkIf (cfg.loadModels != [ ]) {
path = [ config.environment.systemPath ];
serviceConfig = {
# Ne garder actif que pendant le téléchargement
KeepAlive = false;
# Démarrer au login
RunAtLoad = true;
# Script pour télécharger les modèles
ProgramArguments = [
"${pkgs.bash}/bin/bash"
"-c"
''
# Attendre que le service ollama soit prêt
for i in {1..30}; do
if ${ollama} list &>/dev/null; then
break
fi
echo "Waiting for ollama service to be ready... ($i/30)"
sleep 2
done
# Télécharger chaque modèle s'il n'existe pas déjà
${lib.concatMapStringsSep "\n" (model: ''
if ! ${ollama} list | grep -q "${lib.escapeShellArg model}"; then
echo "Pulling model: ${model}"
${ollama} pull ${lib.escapeShellArg model}
else
echo "Model already exists: ${model}"
fi
'') cfg.loadModels}
''
];
StandardOutPath = "${cfg.home}/model-loader.log";
StandardErrorPath = "${cfg.home}/model-loader-error.log";
}; };
};
# Message informatif lors de l'activation
system.activationScripts.postActivation.text = lib.mkIf cfg.enable ''
echo "Ollama service configured to listen on ${cfg.host}:${toString cfg.port}"
echo "Models will be stored in: ${cfg.models}"
${lib.optionalString (cfg.loadModels != [ ]) ''
echo "The following models will be downloaded automatically:"
${lib.concatMapStringsSep "\n" (model: '' echo " - ${model}"'') cfg.loadModels}
''}
'';
};
meta.maintainers = with lib.maintainers; [ ];
} }