Document Management Workflow
Efficiently search, filter, and download documents across your organization using the SingleFile API.
Search Documents by Criteria
Find specific documents using flexible filters like entity type, filing type, jurisdiction, and date ranges.
BASE_URL="https://api.demo.singlefile.io/external-api/v1"
ACCESS_TOKEN="your_bearer_token_here"
ORG_ID="your_organization_id"
# Search LLC formations created after a specific date
curl -G "$BASE_URL/organization/$ORG_ID/documents" \
-H "Authorization: Bearer $ACCESS_TOKEN" \
-d "page_size=100" \
-d "ordering=-created_at" \
-d "entity_type=llc" \
-d "filing_type=formation" \
-d "created_after=2024-01-01"
# Search all Delaware documents
curl -G "$BASE_URL/organization/$ORG_ID/documents" \
-H "Authorization: Bearer $ACCESS_TOKEN" \
-d "page_size=100" \
-d "ordering=-created_at" \
-d "jurisdiction=delaware"import requests
def search_documents(organization_id, access_token, **filters):
"""Search documents with flexible filtering"""
params = {
"page_size": 100,
"ordering": "-created_at"
}
for key, value in filters.items():
if value is not None:
params[key] = value
response = requests.get(
f"https://api.demo.singlefile.io/external-api/v1/organization/{organization_id}/documents",
params=params,
headers={"Authorization": f"Bearer {access_token}"}
)
if response.status_code == 200:
return response.json()["data"]
return []
ACCESS_TOKEN = "your_bearer_token_here"
recent_llc_formations = search_documents(
organization_id,
ACCESS_TOKEN,
entity_type="llc",
filing_type="formation",
created_after="2024-01-01"
)
delaware_documents = search_documents(
organization_id,
ACCESS_TOKEN,
jurisdiction="delaware"
)
print(f"Found {len(recent_llc_formations)} recent LLC formations")
print(f"Found {len(delaware_documents)} Delaware documents")const BASE_URL = "https://api.demo.singlefile.io/external-api/v1";
const ACCESS_TOKEN = "your_bearer_token_here";
async function searchDocuments(organizationId, filters = {}) {
const params = new URLSearchParams({
page_size: "100",
ordering: "-created_at",
...filters
});
const response = await fetch(
`${BASE_URL}/organization/${organizationId}/documents?${params}`,
{ headers: { "Authorization": `Bearer ${ACCESS_TOKEN}` } }
);
if (response.ok) {
const json = await response.json();
return json.data;
}
return [];
}
const recentLlcFormations = await searchDocuments(organizationId, {
entity_type: "llc",
filing_type: "formation",
created_after: "2024-01-01"
});
const delawareDocuments = await searchDocuments(organizationId, {
jurisdiction: "delaware"
});
console.log(`Found ${recentLlcFormations.length} recent LLC formations`);
console.log(`Found ${delawareDocuments.length} Delaware documents`);package main
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
)
const (
baseURL = "https://api.demo.singlefile.io/external-api/v1"
accessToken = "your_bearer_token_here"
)
type DocumentsResponse struct {
Data []map[string]interface{} `json:"data"`
}
func searchDocuments(orgID string, filters map[string]string) ([]map[string]interface{}, error) {
params := url.Values{
"page_size": {"100"},
"ordering": {"-created_at"},
}
for k, v := range filters {
params.Set(k, v)
}
req, _ := http.NewRequest("GET",
fmt.Sprintf("%s/organization/%s/documents?%s", baseURL, orgID, params.Encode()), nil)
req.Header.Set("Authorization", "Bearer "+accessToken)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var result DocumentsResponse
json.NewDecoder(resp.Body).Decode(&result)
return result.Data, nil
}
func main() {
orgID := "your_organization_id"
formations, _ := searchDocuments(orgID, map[string]string{
"entity_type": "llc",
"filing_type": "formation",
"created_after": "2024-01-01",
})
fmt.Printf("Found %d recent LLC formations\n", len(formations))
delDocs, _ := searchDocuments(orgID, map[string]string{
"jurisdiction": "delaware",
})
fmt.Printf("Found %d Delaware documents\n", len(delDocs))
}using System.Net.Http.Json;
using System.Text.Json;
using System.Web;
const string BaseUrl = "https://api.demo.singlefile.io/external-api/v1";
const string AccessToken = "your_bearer_token_here";
var httpClient = new HttpClient();
httpClient.DefaultRequestHeaders.Add("Authorization", $"Bearer {AccessToken}");
async Task<List<JsonElement>> SearchDocuments(
string organizationId, Dictionary<string, string>? filters = null)
{
var query = HttpUtility.ParseQueryString(string.Empty);
query["page_size"] = "100";
query["ordering"] = "-created_at";
if (filters != null)
foreach (var (key, value) in filters)
query[key] = value;
var url = $"{BaseUrl}/organization/{organizationId}/documents?{query}";
var response = await httpClient.GetAsync(url);
if (response.IsSuccessStatusCode)
{
var json = await response.Content.ReadFromJsonAsync<JsonDocument>();
return json!.RootElement.GetProperty("data")
.EnumerateArray().ToList();
}
return new List<JsonElement>();
}
var organizationId = "your_organization_id";
var recentLlcFormations = await SearchDocuments(organizationId,
new Dictionary<string, string>
{
["entity_type"] = "llc",
["filing_type"] = "formation",
["created_after"] = "2024-01-01"
});
var delawareDocuments = await SearchDocuments(organizationId,
new Dictionary<string, string> { ["jurisdiction"] = "delaware" });
Console.WriteLine($"Found {recentLlcFormations.Count} recent LLC formations");
Console.WriteLine($"Found {delawareDocuments.Count} Delaware documents");Bulk Document Download
Download multiple documents efficiently with organized naming.
# Download a single document by URL
curl -o "MyCompany_formation_2024-06-15.pdf" \
"https://api.demo.singlefile.io/path/to/document.pdf"
# Bulk download using a search + loop
BASE_URL="https://api.demo.singlefile.io/external-api/v1"
ACCESS_TOKEN="your_bearer_token_here"
ORG_ID="your_organization_id"
DOWNLOAD_DIR="downloads"
mkdir -p "$DOWNLOAD_DIR"
curl -s -G "$BASE_URL/organization/$ORG_ID/documents" \
-H "Authorization: Bearer $ACCESS_TOKEN" \
-d "filing_type=formation" \
-d "created_after=2024-01-01" \
| jq -r '.data[] | "\(.document_url) \(.entity_name)_\(.document_type)_\(.created_at[:10]).pdf"' \
| while read -r url filename; do
curl -s -o "$DOWNLOAD_DIR/$filename" "$url"
echo "Downloaded: $filename"
doneimport os
import requests
def download_document(document_url, filename, download_dir="downloads"):
"""Download a document to local storage"""
os.makedirs(download_dir, exist_ok=True)
filepath = os.path.join(download_dir, filename)
response = requests.get(document_url, stream=True)
if response.status_code == 200:
with open(filepath, "wb") as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
print(f"Downloaded: {filename}")
return filepath
else:
print(f"Failed to download: {filename}")
return None
def bulk_download_documents(documents, download_dir="downloads"):
"""Download multiple documents with organized naming"""
downloaded_files = []
for doc in documents:
entity_name = doc.get("entity_name", "unknown")
doc_type = doc.get("document_type", "document")
created_date = doc.get("created_at", "").split("T")[0]
filename = f"{entity_name}_{doc_type}_{created_date}.pdf"
filename = "".join(c for c in filename if c.isalnum() or c in (" ", "-", "_")).rstrip()
filepath = download_document(doc["document_url"], filename, download_dir)
if filepath:
downloaded_files.append(filepath)
print(f"Downloaded {len(downloaded_files)} documents to {download_dir}")
return downloaded_files
recent_formations = search_documents(
organization_id,
ACCESS_TOKEN,
filing_type="formation",
created_after="2024-01-01"
)
downloaded_files = bulk_download_documents(recent_formations)import { writeFile, mkdir } from "fs/promises";
import { join } from "path";
async function downloadDocument(documentUrl, filename, downloadDir = "downloads") {
await mkdir(downloadDir, { recursive: true });
const filepath = join(downloadDir, filename);
const response = await fetch(documentUrl);
if (response.ok) {
const buffer = Buffer.from(await response.arrayBuffer());
await writeFile(filepath, buffer);
console.log(`Downloaded: ${filename}`);
return filepath;
}
console.log(`Failed to download: ${filename}`);
return null;
}
async function bulkDownloadDocuments(documents, downloadDir = "downloads") {
const downloadedFiles = [];
for (const doc of documents) {
const entityName = doc.entity_name ?? "unknown";
const docType = doc.document_type ?? "document";
const createdDate = (doc.created_at ?? "").split("T")[0];
const filename = `${entityName}_${docType}_${createdDate}.pdf`
.replace(/[^a-zA-Z0-9 \-_\.]/g, "");
const filepath = await downloadDocument(doc.document_url, filename, downloadDir);
if (filepath) downloadedFiles.push(filepath);
}
console.log(`Downloaded ${downloadedFiles.length} documents to ${downloadDir}`);
return downloadedFiles;
}
const recentFormations = await searchDocuments(organizationId, {
filing_type: "formation",
created_after: "2024-01-01"
});
await bulkDownloadDocuments(recentFormations);package main
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
)
func downloadDocument(documentURL, filename, downloadDir string) (string, error) {
os.MkdirAll(downloadDir, 0755)
fpath := filepath.Join(downloadDir, filename)
resp, err := http.Get(documentURL)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return "", fmt.Errorf("failed to download: %s", filename)
}
out, _ := os.Create(fpath)
defer out.Close()
io.Copy(out, resp.Body)
fmt.Printf("Downloaded: %s\n", filename)
return fpath, nil
}
func bulkDownloadDocuments(documents []map[string]interface{}, downloadDir string) []string {
re := regexp.MustCompile(`[^a-zA-Z0-9 \-_]`)
var downloaded []string
for _, doc := range documents {
entityName, _ := doc["entity_name"].(string)
if entityName == "" {
entityName = "unknown"
}
docType, _ := doc["document_type"].(string)
if docType == "" {
docType = "document"
}
createdAt, _ := doc["created_at"].(string)
createdDate := strings.Split(createdAt, "T")[0]
filename := re.ReplaceAllString(
fmt.Sprintf("%s_%s_%s.pdf", entityName, docType, createdDate), "")
fpath, err := downloadDocument(
doc["document_url"].(string), filename, downloadDir)
if err == nil {
downloaded = append(downloaded, fpath)
}
}
fmt.Printf("Downloaded %d documents to %s\n", len(downloaded), downloadDir)
return downloaded
}using System.Text.Json;
using System.Text.RegularExpressions;
async Task<string?> DownloadDocument(
string documentUrl, string filename, string downloadDir = "downloads")
{
Directory.CreateDirectory(downloadDir);
var filepath = Path.Combine(downloadDir, filename);
using var response = await httpClient.GetAsync(documentUrl);
if (response.IsSuccessStatusCode)
{
var bytes = await response.Content.ReadAsByteArrayAsync();
await File.WriteAllBytesAsync(filepath, bytes);
Console.WriteLine($"Downloaded: {filename}");
return filepath;
}
Console.WriteLine($"Failed to download: {filename}");
return null;
}
async Task<List<string>> BulkDownloadDocuments(
List<JsonElement> documents, string downloadDir = "downloads")
{
var downloadedFiles = new List<string>();
var sanitize = new Regex(@"[^a-zA-Z0-9 \-_]");
foreach (var doc in documents)
{
var entityName = doc.TryGetProperty("entity_name", out var en)
? en.GetString() ?? "unknown" : "unknown";
var docType = doc.TryGetProperty("document_type", out var dt)
? dt.GetString() ?? "document" : "document";
var createdAt = doc.TryGetProperty("created_at", out var ca)
? ca.GetString() ?? "" : "";
var createdDate = createdAt.Split('T')[0];
var filename = sanitize.Replace(
$"{entityName}_{docType}_{createdDate}.pdf", "");
var docUrl = doc.GetProperty("document_url").GetString()!;
var filepath = await DownloadDocument(docUrl, filename, downloadDir);
if (filepath != null)
downloadedFiles.Add(filepath);
}
Console.WriteLine($"Downloaded {downloadedFiles.Count} documents to {downloadDir}");
return downloadedFiles;
}
var recentFormations = await SearchDocuments(organizationId,
new Dictionary<string, string>
{
["filing_type"] = "formation",
["created_after"] = "2024-01-01"
});
await BulkDownloadDocuments(recentFormations);Tip: For large document sets, consider downloading in parallel. In Python use
concurrent.futures.ThreadPoolExecutor, in JavaScript usePromise.all()with batching, in Go use goroutines with async.WaitGroup, and in C# useTask.WhenAll()withSemaphoreSlimfor concurrency control.
Updated about 1 month ago
