Use this pre-trained Architectural Diagram computer vision model to retrieve predictions with our hosted API or deploy to the edge. Learn More About Roboflow Inference
Upload Image or Video File
Paste YouTube or Image URL
Confidence Threshold: 50
0%
100%
Overlap Threshold: 50
0%
100%
Inference is Roboflow's open source deployment package for developer-friendly vision inference.
Using Roboflow, you can deploy your object detection model to a range of environments, including:
Below, we have instructions on how to use our deployment options.
To install dependencies, pip install inference-sdk
.
Then, add the following code snippet to a Python script:
from inference_sdk import InferenceHTTPClient
CLIENT = InferenceHTTPClient(
api_url="https://detect.roboflow.com",
api_key="API_KEY"
)
result = CLIENT.infer(your_image.jpg, model_id="architectural-diagram/3")
Retrieving JSON predictions for a local file called YOUR_IMAGE.jpg:
base64 YOUR_IMAGE.jpg | curl -d @- \
"https://detect.roboflow.com/architectural-diagram/3?api_key=API_KEY"
Inferring on an image hosted elsewhere on the web via its URL (don't forget to URL encode it:
curl -X POST "https://detect.roboflow.com/architectural-diagram/3?\
api_key=API_KEY&\
image=https://source.roboflow.com/bZ6vBtacs4UCPhjAiuqAoKwKtwI2/V7lHqlhZiaJhrDEQRNh0/original.jpg"
You will need to install curl for Windows and GNU's base64 tool for Windows. The easiest way to do this is to use the git for Windows installer which also includes the curl and base64 command line tools when you select "Use Git and optional Unix tools from the Command Prompt" during installation.
Then you can use the same commands as above.
We're using axios to perform the POST request in this example so first run npm install axios to install the dependency.
const axios = require("axios");
const fs = require("fs");
const image = fs.readFileSync("YOUR_IMAGE.jpg", {
encoding: "base64"
});
axios({
method: "POST",
url: "https://detect.roboflow.com/architectural-diagram/3",
params: {
api_key: "API_KEY"
},
data: image,
headers: {
"Content-Type": "application/x-www-form-urlencoded"
}
})
.then(function(response) {
console.log(response.data);
})
.catch(function(error) {
console.log(error.message);
});
const axios = require("axios");
axios({
method: "POST",
url: "https://detect.roboflow.com/architectural-diagram/3",
params: {
api_key: "API_KEY",
image: "IMAGE_URL"
}
})
.then(function(response) {
console.log(response.data);
})
.catch(function(error) {
console.log(error.message);
});
We have realtime on-device inference available via inferencejs; see the documentation here..
This will load your model to run realtime inference directly in your users' web-browser using WebGL instead of passing images to the server-side.
Note: you shouldn't expose your Roboflow API key in the front-end to users outside of your organization.
This snippet should either use your users' API key (for example, if you're building model assisted labeling into your own labeling tool) or be put behind authentication so it's only usable by users who already have access to your Roboflow workspace.
import axios from 'axios';
const loadImageBase64 = (file) => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.readAsDataURL(file);
reader.onload = () => resolve(reader.result);
reader.onerror = (error) => reject(error);
});
}
const image = await loadImageBase64(fileData);
axios({
method: "POST",
url: "https://detect.roboflow.com/architectural-diagram/3",
params: {
api_key: "API_KEY"
},
data: image,
headers: {
"Content-Type": "application/x-www-form-urlencoded"
}
})
.then(function(response) {
console.log(response.data);
})
.catch(function(error) {
console.log(error.message);
});
import UIKit
// Load Image and Convert to Base64
let image = UIImage(named: "your-image-path") // path to image to upload ex: image.jpg
let imageData = image?.jpegData(compressionQuality: 1)
let fileContent = imageData?.base64EncodedString()
let postData = fileContent!.data(using: .utf8)
// Initialize Inference Server Request with API KEY, Model, and Model Version
var request = URLRequest(url: URL(string: "https://detect.roboflow.com/architectural-diagram/3?api_key=API_KEY&name=YOUR_IMAGE.jpg")!,timeoutInterval: Double.infinity)
request.addValue("application/x-www-form-urlencoded", forHTTPHeaderField: "Content-Type")
request.httpMethod = "POST"
request.httpBody = postData
// Execute Post Request
URLSession.shared.dataTask(with: request, completionHandler: { data, response, error in
// Parse Response to String
guard let data = data else {
print(String(describing: error))
return
}
// Convert Response String to Dictionary
do {
let dict = try JSONSerialization.jsonObject(with: data, options: []) as? [String: Any]
} catch {
print(error.localizedDescription)
}
// Print String Response
print(String(data: data, encoding: .utf8)!)
}).resume()
using System;
using System.IO;
using System.Net;
using System.Text;
namespace UploadLocal
{
class UploadLocal
{
static void Main(string[] args)
{
byte[] imageArray = System.IO.File.ReadAllBytes(@"YOUR_IMAGE.jpg");
string encoded = Convert.ToBase64String(imageArray);
byte[] data = Encoding.ASCII.GetBytes(encoded);
string api_key = "API_KEY"; // Your API Key
string DATASET_NAME = "architectural-diagram"; // Set Dataset Name (Found in Dataset URL)
// Construct the URL
string uploadURL =
"https://api.roboflow.com/dataset/" +
DATASET_NAME + "/upload" +
"?api_key=" + api_key +
"&name=YOUR_IMAGE.jpg" +
"&split=train";
// Service Request Config
ServicePointManager.Expect100Continue = true;
ServicePointManager.SecurityProtocol = SecurityProtocolType.Tls12;
// Configure Request
WebRequest request = WebRequest.Create(uploadURL);
request.Method = "POST";
request.ContentType = "application/x-www-form-urlencoded";
request.ContentLength = data.Length;
// Write Data
using (Stream stream = request.GetRequestStream())
{
stream.Write(data, 0, data.Length);
}
// Get Response
string responseContent = null;
using (WebResponse response = request.GetResponse())
{
using (Stream stream = response.GetResponseStream())
{
using (StreamReader sr99 = new StreamReader(stream))
{
responseContent = sr99.ReadToEnd();
}
}
}
Console.WriteLine(responseContent);
}
}
}
using System;
using System.IO;
using System.Net;
using System.Text;
namespace InferenceLocal
{
class InferenceLocal
{
static void Main(string[] args)
{
byte[] imageArray = System.IO.File.ReadAllBytes(@"YOUR_IMAGE.jpg");
string encoded = Convert.ToBase64String(imageArray);
byte[] data = Encoding.ASCII.GetBytes(encoded);
string api_key = "API_KEY"; // Your API Key
string model_endpoint = "architectural-diagram/3"; // Set model endpoint
// Construct the URL
string uploadURL =
"https://detect.roboflow.com/" + model_endpoint + "?api_key=" + API_KEY
+ "&name=YOUR_IMAGE.jpg";
// Service Request Config
ServicePointManager.Expect100Continue = true;
ServicePointManager.SecurityProtocol = SecurityProtocolType.Tls12;
// Configure Request
WebRequest request = WebRequest.Create(uploadURL);
request.Method = "POST";
request.ContentType = "application/x-www-form-urlencoded";
request.ContentLength = data.Length;
// Write Data
using (Stream stream = request.GetRequestStream())
{
stream.Write(data, 0, data.Length);
}
// Get Response
string responseContent = null;
using (WebResponse response = request.GetResponse())
{
using (Stream stream = response.GetResponseStream())
{
using (StreamReader sr99 = new StreamReader(stream))
{
responseContent = sr99.ReadToEnd();
}
}
}
Console.WriteLine(responseContent);
}
}
}
using System;
using System.IO;
using System.Net;
using System.Web;
namespace InferenceHosted
{
class InferenceHosted
{
static void Main(string[] args)
{
string api_key = ""; // Your API Key
string imageURL = "https://i.ibb.co/jzr27x0/YOUR-IMAGE.jpg";
string model_endpoint = "dataset/v"; // Set model endpoint
// Construct the URL
string uploadURL =
"https://detect.roboflow.com/" + model_endpoint
+ "?api_key=" + api_key
+ "&image=" + HttpUtility.UrlEncode(imageURL);
// Service Point Config
ServicePointManager.Expect100Continue = true;
ServicePointManager.SecurityProtocol = SecurityProtocolType.Tls12;
// Configure Http Request
WebRequest request = WebRequest.Create(uploadURL);
request.Method = "POST";
request.ContentType = "application/x-www-form-urlencoded";
request.ContentLength = 0;
// Get Response
string responseContent = null;
using (WebResponse response = request.GetResponse())
{
using (Stream stream = response.GetResponseStream())
{
using (StreamReader sr99 = new StreamReader(stream))
{
responseContent = sr99.ReadToEnd();
}
}
}
Console.WriteLine(responseContent);
}
}
}
Export and use this model to create custom lenses within Snap AR's Lens Studio. Read More
Look through our full documentation for more information and resources on how to utilize this model.
Use this model with a full fledged web application that has all sample code included.
Perform inference at the edge with a Jetson via our Docker container.
Utilize your model on your mobile device.