Web Analytics Made Easy - Statcounter
Skip to content

Anosys Data Ingestion

Anosys Platform supports various ways to ingest your data, including vendor-specific integrations (e.g. OpenAI), vendor-agnostic integrations (e.g. Open Telemetry), or general tracking through API calls or HTTP pixels (POST and GET).

Integrating with OpenAI

OpenAI workflows can be integrated either natively using our Python packages where logging happens in the background using data that the OpenAI platform exposes. However, developers can also use any custom logging they want using Python decorators, or custom API calls.

Native OpenAI Integration

pip install openai
pip install traceAI-openai-agents
pip install anosys-logger-4-openai

# the key is retrieved from the environment variables that Colab stores (click the key on the left toolbar for more details)
os.environ["OPENAI_API_KEY"] = userdata.get("OPENAI_API_KEY")
os.environ["ANOSYS_API_KEY"] = os.getenv("ANOSYS_API_KEY", "YOUR_ANOSYS_KEY")

AnosysOpenAILogger()
client = OpenAI()

response = client.responses.create(
    model="gpt-5",
    input="What is the population of New York City?"
)

print(response.output_text)

Custom Logging with Decorators

1
2
3
4
5
6
7
# wrap your function with the anosys_logger decorator

@anosys_logger(source="My function call")
def myfunction(param1=None, param2=None):
    return f"-={param1}-{param2}=-"

print("->",myfunction("custom", "logging"))

Open Telemetry Integration

# OTEL setup for traces + metrics + logs (OTLP/HTTP)

import os
import time
import random
import atexit
import logging
from typing import Optional, Dict

# --- OpenTelemetry core ---
from opentelemetry.sdk.resources import Resource
from opentelemetry.semconv.resource import ResourceAttributes

# Traces
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter

# Metrics
from opentelemetry import metrics
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter

# Logs (logging signal is still under _logs in current releases)
from opentelemetry._logs import set_logger_provider
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter


# --------------------------
# Config
# --------------------------
BACKEND_BASE = os.getenv(
    "OTLP_BASE",
    "https://api.anosys.ai/[your_unique_path],
)
SERVICE_NAME = os.getenv("OTEL_SERVICE_NAME", "climate-otel")
SERVICE_VERSION = os.getenv("OTEL_SERVICE_VERSION", "1.0.0")
DEPLOY_ENV = os.getenv("OTEL_ENV", "dev")
HEADERS: Dict[str, str] = {}  # e.g. {"Authorization": "Bearer <token>"}

# Convenience helpers for endpoints
TRACES_URL = f"{BACKEND_BASE}/v1/traces"
METRICS_URL = f"{BACKEND_BASE}/v1/metrics"
LOGS_URL = f"{BACKEND_BASE}/v1/logs"


# --------------------------
# Resource (shared by all signals)
# --------------------------
def build_resource() -> Resource:
    return Resource.create({
        ResourceAttributes.SERVICE_NAME: SERVICE_NAME,
        ResourceAttributes.SERVICE_VERSION: SERVICE_VERSION,
        "deployment.environment": DEPLOY_ENV,
    })


# --------------------------
# Traces
# --------------------------
def setup_tracing(resource: Resource) -> None:
    provider = TracerProvider(resource=resource)
    span_exporter = OTLPSpanExporter(endpoint=TRACES_URL, headers=HEADERS)
    provider.add_span_processor(BatchSpanProcessor(span_exporter))
    trace.set_tracer_provider(provider)

    # Ensure flush on exit
    atexit.register(provider.shutdown)


# --------------------------
# Metrics
# --------------------------
def setup_metrics(resource: Resource, export_interval_ms: int = 1000) -> None:
    metric_exporter = OTLPMetricExporter(endpoint=METRICS_URL, headers=HEADERS)
    reader = PeriodicExportingMetricReader(
        metric_exporter, export_interval_millis=export_interval_ms
    )
    provider = MeterProvider(resource=resource, metric_readers=[reader])
    metrics.set_meter_provider(provider)

    # Ensure flush on exit
    atexit.register(provider.shutdown)


# --------------------------
# Logs
# --------------------------
def setup_logging(resource: Resource, level: int = logging.INFO) -> logging.Logger:
    logger_provider = LoggerProvider(resource=resource)
    set_logger_provider(logger_provider)

    log_exporter = OTLPLogExporter(endpoint=LOGS_URL, headers=HEADERS)
    logger_provider.add_log_record_processor(BatchLogRecordProcessor(log_exporter))
    atexit.register(logger_provider.shutdown)

    # Bridge stdlib logging -> OTEL
    app_logger = logging.getLogger(SERVICE_NAME)
    app_logger.setLevel(level)
    app_logger.handlers.clear()
    app_logger.addHandler(LoggingHandler(level=level, logger_provider=logger_provider))
    return app_logger


# --------------------------
# Demo: generate some telemetry
# --------------------------
def demo_telemetry(iterations: int = 5, delay_sec: float = 1.0) -> None:
    tracer = trace.get_tracer(f"{SERVICE_NAME}.demo")
    meter = metrics.get_meter(f"{SERVICE_NAME}.demo")

    # A simple counter metric
    requests_counter = meter.create_counter(
        name="demo.requests",
        description="Number of demo requests processed",
        unit="{request}",
    )

    # A value recorder / histogram for latency (if you want a gauge-like measure, use ObservableGauge)
    latency_hist = meter.create_histogram(
        name="demo.request_latency_ms",
        description="Simulated request latency in ms",
        unit="ms",
    )

    # Stdlib logger (OTEL-bridged)
    logger = logging.getLogger(SERVICE_NAME)

    for i in range(iterations):
        # ---- Trace ----
        with tracer.start_as_current_span("demo-operation") as span:
            span.set_attribute("iteration", i)
            span.set_attribute("work.kind", "demo")
            # Simulate some "work"
            simulated_latency_ms = random.randint(20, 200)
            time.sleep(delay_sec)

            # ---- Metrics ----
            requests_counter.add(1, {"route": "/demo", "status_code": 200})
            latency_hist.record(simulated_latency_ms, {"route": "/demo"})

            # ---- Logs ----
            logger.info(
                "Processed demo request",
                extra={"iteration": i, "latency_ms": simulated_latency_ms},
            )

        print(f"sent: trace+metric+log iteration={i} latency_ms={simulated_latency_ms}")


def main() -> None:
    resource = build_resource()
    setup_tracing(resource)
    setup_metrics(resource, export_interval_ms=1000)
    setup_logging(resource, level=logging.DEBUG)
    demo_telemetry()


if __name__ == "__main__":
    main()

Custom API Calls

import requests

# Your unique AnoSys ingestion path
url = "https://api.anosys.ai/ingestion/your_unique_path"

# Query parameters
params = {
    "s1": "string1",  # string parameter
    "n1": 123.45      # numeric parameter (example value)
}

try:
    response = requests.get(url, params=params, timeout=10)
    response.raise_for_status()  # raise error for bad responses (4xx/5xx)
    print("Success:", response.text)
except requests.exceptions.RequestException as e:
    print("Error:", e)

Javascript

1
2
3
4
5
6
7
8
9
<!-- Sample code for AnoSys https://anosys.ai -->
<script type="text/javascript">
var anosys_project="your_project_id"; 
</script>

<script async type="text/javascript"
src="https://api.anosys.ai/webstats.js" ></script>
<noscript><div class="anosys"><img src='https://api.anosys.ai/ingestion/[your_unique_path]' referrerPolicy="no-referrer-when-downgrade" width="0" height="0"></div></noscript>
<!-- End of AnoSys Code -->
<!-- Sample code for AnoSys https://anosys.ai -->
<script type="text/javascript">
// Placeholder function definitions

// Replace these with your actual data retrieval functions
function getIP() {
  return '192.168.1.1';
}

function getTimestamp() {
  return Date.now();
}

function isSTB() {
  return false;
}

var anosys_project="your_project_id"; 
var anosys_cvs1 = getIP();
var anosys_cvn1 = getTimestamp();
var anosys_cvb1 = isSTB();
</script>
<script async type="text/javascript"
src="https://api.anosys.ai/customstats.js" ></script>
<noscript><div class="anosys"><img src='https://api.anosys.ai/ingestion/[your_unique_path]' referrerPolicy="no-referrer-when-downgrade" width="0" height="0"></div></noscript>
<!-- End of AnoSys Code -->

HTTP POST/GET

Image Pixels

Image (0x0) pixels can be used for tracking traffic to websites, or mobile apps. They can be piggybacked with custom variables (e.g. s1=value, s2=value, etc.)
Augmenting your application with 0x0 tracking pixels can further enhance visibility into your application since you can correlate technical metrics with topline metrics (e.g. how users engage with your product, etc.).

<img src='https://api.anosys.ai/ingestion/[your_unique_path]/anosys.gif&s1=value&s2=value' width="0" height="0">