Neptune API cheat sheet
This page demonstrates the Neptune Python API purely through code examples.
Setup and imports
Create a project
from neptune_scale.projects import create_project
create_project(
name="project-x",
workspace="team-alpha", # a workspace with this name must already exist
visibility="workspace", # default is private
description="Team-wide sandbox project",
)
Logging API
from neptune_scale import Run
from neptune_scale.types import File
# Always use the Python "main guard" around Neptune code
if __name__ == "__main__":
# Runs are units of experiments
run = Run(...)
Fetching API
import neptune_fetcher.alpha as npt
from neptune_fetcher.alpha.filters import Filter, AttributeFilter
# Set the context, if different from or not set by environment variables
npt.set_context(Context(project="team-alpha/project-x", api_token="SomeOtherToken"))
# Set the project only
npt.set_project("team-alpha/project-y")
# Target a project without changing the global context
another_project = npt.get_context().with_project("team-beta/project-y")
npt.list_experiments(experiments=r"week-\d$", context=another_project)
Run object
Create or continue an experiment
# Create an experiment run
run = Run(experiment_name="seabird-flying-skills")
# Create a detached run – best used for debugging purposes only
run = Run()
# You can pass Neptune credentials directly
run = Run(
experiment_name="seabird-flying-skills",
api_token="h0dHBzOi8aHR0cHM.4kl0jvYh3Kb8...ifQ==",
project="team-alpha/project-x",
)
# Continue logging to an existing run
run = Run(
run_id="likable-barracuda", # existing run ID
resume=True,
)
# Create a fork run that inherits metrics from the parent
run = Run(
experiment_name="seabird-flying-skills",
fork_run_id="likable-barracuda", # parent run ID
fork_step=100,
)
# Customize the run with other options
run = Run(
experiment_name="seabird-flying-skills",
mode="offline", # logging mode
log_directory="my_dir", # data directory
enable_console_log_capture=False, # stderr and stdout
runtime_namespace="infra", # namespace for system and environment info
)
Annotate the run
# Add a description
run.log_configs({"sys/description": "First test with new v2.0 dataset"})
# You and other project contributors can also edit it in the web app
# Add tags
run.add_tags(tags=["tag1", "tag2", "tag3"])
# Remove tags
run.remove_tags(tags=["tag2"])
Archive a run
run.log_configs({"sys/archived": True})
# Unarchive
run.log_configs({"sys/archived": False})
Single values
Log single values
# Log single Boolean, integer, float, string, or datetime values
run.log_configs(
{
"use_preprocessing": True,
"learning_rate": 0.001,
"optimizer": "Adam",
}
)
# To create a folder, nest the attributes under a common namespace
run.log_configs(
{
"config/use_preprocessing": True,
"config/learning_rate": 0.001,
"config/optimizer": "Adam",
}
)
# If you write to the same attribute path, the previous value is overwritten
run.log_configs(
{
"config/optimizer": "Adadelta", # overwrites "Adam"
}
)
# Log other single values, like summary scores
run.log_configs(
{
"summary/scores/f1": 0.89,
"summary/epoch_average_seconds": 48.2,
},
)
# Log custom timestamps
run.log_configs({"train/end": datetime.now()})
Fetch single values
# Fetch all attributes and filter experiments by learning rate
lr_filter = Filter.eq("config/learning_rate", 0.001)
npt.fetch_experiments_table(experiments=lr_filter)
# Fetch all experiments with config attributes as columns
npt.fetch_experiments_table(attributes=r"config/")
Metrics
Log numerical series
# Log one metric per step
run.log_metrics(
data={"metrics/loss": 0.14},
step=1,
)
# Log multiple metrics per step
run.log_metrics(
data={
"metrics/loss": 0.12,
"metrics/acc": 0.78,
},
step=2,
)
# Steps must increase within the same series
run.log_metrics(
data={"metrics/loss": 0.13},
step=1.5, # Error – step 2 already logged
)
# A new series attribute can start from any step
run.log_metrics(
data={"metrics/new": 0.1},
step=0.5, # OK
)
# Log previews of incomplete values and analyze them separately from regular points
run.log_metrics(
data={"metrics/loss": 0.112},
step=2.5, # OK
preview=True,
preview_completion=0.8,
)
# Preview values can be out-of-order, as long as they come after the last regular step
run.log_metrics(
data={"metrics/loss": 0.111},
step=2.2, # OK - greater than 2
preview=True,
preview_completion=0.4,
)
Fetch numerical series
# Fetch experiments table with 'metrics' attributes as columns
npt.fetch_experiments_table(attributes=r"metrics/")
# The last logged value is used by default
# Use other aggregations than last value
max_and_var = AttributeFilter(
name_eq=r"metrics/",
aggregations=["max", "variance"],
)
npt.fetch_experiments_table(attributes=max_and_var)
# Fetch experiments that pass the loss filter and specify attributes to use as columns
loss_filter = Filter.lt("metrics/val/loss", 0.1)
npt.fetch_experiments_table(
experiments=loss_filter,
attributes=r"config/",
)
# Fetch experiments that pass the loss filter and return metric values per step
npt.fetch_metrics(experiments=loss_filter)
# Customize the table of metric values
npt.fetch_metrics(
experiments=loss_filter,
attributes=r"config/",
step_range=(1000.0, None),
tail_limit=100,
lineage_to_the_root=False,
type_suffix_in_column_names=True,
)
# Include previews of incomplete points (logged with preview=True)
npt.fetch_metrics(
experiments=loss_filter,
attributes=r"config/",
include_point_previews=True,
)
Files
Upload single files
# Create one or more file attributes
run.assign_files(
{
"dataset/sample": "data/sample_data.csv",
"dataset/labels": "data/labels.json",
"dataset/processing_script": "scripts/processing.py",
}
)
# Specify details about the file object
run.assign_files(
{
"files/file.txt": File(source=b"Hello world!", mime_type="text/plain"),
}
)
Download single files
# Download files from a particular experiment
npt.download_files(experiments="finicky-grebe-week-6")
# Download files from matching experiments and specify a destination
npt.download_files(
experiments=r"week-6$",
attributes=r"dataset/",
destination="npt_downloads",
)
# Download all files from the project – supported for files uploaded with assign_files()
npt.download_files()
Upload series of files per step
# Log a file per step
run.log_files(
files={"predictions/train": "output/train/predictions.png"},
step=1,
)
# Log multiple file series in a single call
run.log_files(
files={
"predictions/train": "output/train/predictions.png",
"predictions/val": "output/val/predictions.png",
},
step=2,
)
# You can also use the File type, as for single file assignment
Histograms
Define a histogram
from neptune_scale.types import Histogram
# Specify the bin edges (the ranges for the histogram bars)
bin_edges = [0, 1, 40, 89, 1000]
# Define a histogram through data-point counts per bin
neptune_histogram = Histogram(
bin_edges=bin_edges,
counts=[5, 82, 44, 1],
)
# or through densities
neptune_histogram = Histogram(
bin_edges=bin_edges,
densities=[0.25, 0.25, 0.25, 0.25],
)
# or use NumPy
a = numpy.arange(5)
densities, bin_edges = np.histogram(a, density=True)
neptune_histogram = Histogram(bin_edges=bin_edges, densities=densities)
Log one or more histograms per step
for step in epoch:
# your training loop
my_histogram_1 = Histogram(...)
my_histogram_2 = Histogram(...)
my_histogram_3 = Histogram(...)
run.log_histograms(
histograms={
"layers/1/activations": my_histogram_1,
"layers/2/activations": my_histogram_2,
"layers/3/activations": my_histogram_3,
},
step=step,
)
Text
# Log a string
run.log_configs({"text": "Some text related to the run."})
# Log series of strings per step
for step in epoch:
# your training loop
run.log_string_series(
data={
"messages/errors": "Job failed",
"messages/info": "Training completed",
},
step=1.2,
)
What's in a project
# List all experiments
npt.list_experiments()
# List experiments that match a regex pattern
npt.list_experiments(r"week-\d$")
# List all attributes
npt.list_attributes()
# List matching attributes
npt.list_attributes(
experiments=r"week-\d$",
attributes=r"metric.*|config/.*",
)
Access runs instead of experiments
# Import the runs module
from neptune_fetcher.alpha import runs
# Call the fetching method from runs
runs.fetch_metrics(
runs=r"^speedy-seagull.*_02", # use the 'runs' argument to specify run IDs
attributes=r"loss/.*",
)
Troubleshooting
On logging failure, raise an exception instead of silently dropping the data
export NEPTUNE_LOG_FAILURE_ACTION=raise
Set the logger level to highest
export NEPTUNE_LOGGER_LEVEL=critical
Send an offline run to the server
neptune sync .neptune/team-alpha_project-x_likable-barracuda_174193/run_operations.sqlite3