Skip to main content
App version: 3.20250811

Neptune API cheat sheet

This guide demonstrates the Neptune Python API purely through code examples.

Setup and imports

Create a project
from neptune_scale.projects import create_project


create_project(
name="project-x",
workspace="team-alpha", # a workspace with this name must already exist
visibility="workspace", # default is private
description="Team-wide sandbox project",
)
Logging API
from neptune_scale import Run
from neptune_scale.types import File


# Always use the Python "main guard" around Neptune code
if __name__ == "__main__":
# Runs are units of experiments
run = Run(...)
Query API
import neptune_query as nq
from neptune_query.filters import Filter, AttributeFilter

Run object

Create or continue an experiment
# Create an experiment run
run = Run(experiment_name="seabird-flying-skills")

# Create a detached run – best used for debugging purposes only
run = Run()

# You can pass Neptune credentials directly
run = Run(
experiment_name="seabird-flying-skills",
api_token="h0dHBzOi8aHR0cHM.4kl0jvYh3Kb8...ifQ==",
project="team-alpha/project-x",
)

# Continue logging to an existing run
run = Run(
run_id="likable-barracuda", # run with this ID already exists
)

# Create a fork run that inherits metrics from the parent
run = Run(
experiment_name="seabird-flying-skills",
fork_run_id="likable-barracuda", # parent run ID
fork_step=100,
)

# Customize the run with other options
run = Run(
experiment_name="seabird-flying-skills",
mode="offline", # logging mode
log_directory="my_dir", # data directory
enable_console_log_capture=False, # stderr and stdout
runtime_namespace="infra", # namespace for system and environment info
)
Annotate and categorize the experiment run
# Add a description
run.log_configs({"sys/description": "First test with new v2.0 dataset"})
# You and other project contributors can also edit it in the web app

# Add tags
run.add_tags(tags=["tag1", "tag2", "tag3"])
# Add group labels
run.add_tags(group_tags=True, tags=["group1", "group2"])

# Remove tags
run.remove_tags(tags=["tag2"])
# Remove group labels
run.remove_tags(group_tags=True, tags=["group2"])
Archive a run
run.log_configs({"sys/archived": True})

# Unarchive
run.log_configs({"sys/archived": False})

Single values

Log single values
# Log single Boolean, integer, float, string, or datetime values
run.log_configs(
{
"use_preprocessing": True,
"learning_rate": 0.001,
"optimizer": "Adam",
}
)

# To create a folder, nest the attributes under a common namespace
run.log_configs(
{
"config/use_preprocessing": True,
"config/learning_rate": 0.001,
"config/optimizer": "Adam",
}
)

# If you write to the same attribute path, the previous value is overwritten
run.log_configs(
{
"config/optimizer": "Adadelta", # overwrites "Adam"
}
)

# Log other single values, like summary scores
run.log_configs(
{
"summary/scores/f1": 0.89,
"summary/epoch_average_seconds": 48.2,
},
)

# Log custom timestamps
run.log_configs({"train/end": datetime.now()})

# Flatten nested structures and cast unsupported types to string
data = {
"metrics": {
"token_count": 76420,
"agg": {
"loss": None,
"acc": None,
}
},
"some_list": [1, "test", None],
}
run.log_configs(
data=data,
flatten=True,
cast_unsupported=True,
)
Use single values in queries
# Fetch experiment metadata as table, with config attributes as columns
nq.fetch_experiments_table(attributes=r"config/")

# Fetch experiments with certain learning rate and return last metric values
lr_filter = Filter.eq("config/learning_rate", 0.001)
nq.fetch_experiments_table(
experiments=lr_filter,
attributes=r"metrics/train/",
)

Metrics

Log numerical series
# Log one metric per step
run.log_metrics(
data={"metrics/loss": 0.14},
step=1,
)

# Log multiple metrics per step
run.log_metrics(
data={
"metrics/loss": 0.12,
"metrics/acc": 0.78,
},
step=2,
)

# Steps must increase within the same series
run.log_metrics(
data={"metrics/loss": 0.13},
step=1.5, # Error – step 2 already logged
)

# A new series attribute can start from any step
run.log_metrics(
data={"metrics/new": 0.1},
step=0.5, # OK
)

# Log previews of incomplete values and analyze them separately from regular points
run.log_metrics(
data={"metrics/loss": 0.112},
step=2.5, # OK
preview=True,
preview_completion=0.8,
)

# Preview values can be out-of-order, as long as they come after the last regular step
run.log_metrics(
data={"metrics/loss": 0.111},
step=2.2, # OK - greater than 2
preview=True,
preview_completion=0.4,
)
Fetch numerical series
# Fetch experiments table with 'metrics' attributes as columns
nq.fetch_experiments_table(attributes=r"metrics/")
# The last logged value is returned by default

# Define validation loss requirement for experiments
loss_filter = Filter.lt("metrics/val/loss", 0.1)
# Fetch training metrics of experiments that pass the loss filter
nq.fetch_metrics(
experiments=loss_filter,
attributes=r"metrics/train/",
)

# Customize the table of returned metric values
nq.fetch_metrics(
experiments=loss_filter,
attributes=r"metrics/train/",
step_range=(1000.0, None),
tail_limit=100,
lineage_to_the_root=False,
type_suffix_in_column_names=True,
)

# Include previews of incomplete points (logged with preview=True)
nq.fetch_metrics(
experiments=loss_filter,
attributes=r"metrics/train/",
tail_limit=10,
include_point_previews=True,
)

Files

Upload single files
# Create one or more file attributes
run.assign_files(
{
"dataset/sample": "data/sample_data.csv",
"dataset/labels": "data/labels.json",
"dataset/processing_script": "scripts/processing.py",
}
)

# Specify details about the file object
run.assign_files(
{
"files/file.txt": File(source=b"Hello world!", mime_type="text/plain"),
}
)
Download single files
# Specify experiments with file attributes
interesting_files = nq.fetch_experiments_table(
project="team-alpha/project-x", # optional, can also be read from environment variable
experiments=["seagull-week1", "seagull-week2"],
attributes=r"^dataset/",
)

nq.download_files(
files=interesting_files,
destination="npt_downloads/datasets",
)
Upload series of files per step
# Log a file per step
run.log_files(
files={"predictions/train": "output/train/predictions.png"},
step=1,
)

# Log multiple file series in a single call
run.log_files(
files={
"predictions/train": "output/train/predictions.png",
"predictions/val": "output/val/predictions.png",
},
step=2,
)
# You can also use the File type, as for single file assignment
Download series of files
# Specify experiments with file series attributes
interesting_files = nq.fetch_series(
project="team-alpha/project-x", # optional, can also be read from environment variable
experiments=["seagull-week1", "seagull-week2"],
attributes=r"^predictions/",
step_range=(2050.0, 2056.0),
)

nq.download_files(
files=interesting_files,
destination="npt_downloads/predictions",
)

Histograms

Define a histogram
from neptune_scale.types import Histogram


# Specify the bin edges (the ranges for the histogram bars)
bin_edges = [0, 1, 40, 89, 1000]

# Define a histogram through data-point counts per bin
neptune_histogram = Histogram(
bin_edges=bin_edges,
counts=[5, 82, 44, 1],
)

# or through densities
neptune_histogram = Histogram(
bin_edges=bin_edges,
densities=[0.25, 0.25, 0.25, 0.25],
)

# or use NumPy
a = numpy.arange(5)
densities, bin_edges = np.histogram(a, density=True)
neptune_histogram = Histogram(bin_edges=bin_edges, densities=densities)
Log one or more histograms per step
for step in epoch:
# your training loop

my_histogram_1 = Histogram(...)
my_histogram_2 = Histogram(...)
my_histogram_3 = Histogram(...)

run.log_histograms(
histograms={
"layers/1/activations": my_histogram_1,
"layers/2/activations": my_histogram_2,
"layers/3/activations": my_histogram_3,
},
step=step,
)
Fetch histogram series
nq.fetch_series(
experiments=["training_week-15"],
attributes=["layers/2/activations"],
tail_limit=5,
)

Text

Single string
# Log a string
run.log_configs({"note": "Some text related to the run."})

# Query the notes later
nq.fetch_experiments_table(attributes=["note"])
Sequence of strings per step
# Log series of strings per step
for step in epoch:
# your training loop

run.log_string_series(
data={
"messages/errors": "Job failed",
"messages/info": "Training completed",
},
step=1.2,
)

# Query the messages later:

# A) One run per row, "messages" columns showing the last logged entry
nq.fetch_experiments_table(attributes=r"^messages/")

# B) All messages from a specific experiment, one step per row
nq.fetch_series(
experiments=["training_week-15"],
attributes=r"^messages/",
)

What's in a project

# List all experiments
nq.list_experiments()

# List experiments that match a regex pattern
nq.list_experiments(r"week-\d$")

# List all attributes
nq.list_attributes()

# List matching attributes
nq.list_attributes(
experiments=r"week-\d$",
attributes=r"metric.*|config/.*",
)
Access runs instead of experiments
# Import the runs module
import neptune_query.runs as nq_runs


# Call the fetching method from runs
nq_runs.fetch_metrics(
runs=r"^speedy-seagull.*_02", # use the 'runs' argument to specify run IDs
attributes=r"loss/.*",
)

Troubleshooting

On logging failure, raise an exception instead of silently dropping the data
export NEPTUNE_LOG_FAILURE_ACTION=raise
Set the logger level to highest
export NEPTUNE_LOGGER_LEVEL=critical
Send an offline run to the server
neptune sync .neptune/team-alpha_project-x_likable-barracuda_174193/run_operations.sqlite3