diff --git a/experimental/ssh/internal/client/client.go b/experimental/ssh/internal/client/client.go index c196e7d9aa..5d9ca6cd24 100644 --- a/experimental/ssh/internal/client/client.go +++ b/experimental/ssh/internal/client/client.go @@ -20,6 +20,7 @@ import ( "github.com/databricks/cli/experimental/ssh/internal/keys" "github.com/databricks/cli/experimental/ssh/internal/proxy" + "github.com/databricks/cli/experimental/ssh/internal/sessions" "github.com/databricks/cli/experimental/ssh/internal/sshconfig" "github.com/databricks/cli/experimental/ssh/internal/vscode" sshWorkspace "github.com/databricks/cli/experimental/ssh/internal/workspace" @@ -99,11 +100,11 @@ type ClientOptions struct { } func (o *ClientOptions) Validate() error { - if !o.ProxyMode && o.ClusterID == "" && o.ConnectionName == "" { - return errors.New("please provide --cluster flag with the cluster ID, or --name flag with the connection name (for serverless compute)") + if !o.ProxyMode && o.ClusterID == "" && o.ConnectionName == "" && o.Accelerator == "" { + return errors.New("please provide --cluster or --accelerator flag") } - if o.Accelerator != "" && o.ConnectionName == "" { - return errors.New("--accelerator flag can only be used with serverless compute (--name flag)") + if o.Accelerator != "" && o.ClusterID != "" { + return errors.New("--accelerator flag can only be used with serverless compute, not with --cluster") } // TODO: Remove when we add support for serverless CPU if o.ConnectionName != "" && o.Accelerator == "" { @@ -122,7 +123,7 @@ func (o *ClientOptions) Validate() error { } func (o *ClientOptions) IsServerlessMode() bool { - return o.ClusterID == "" && o.ConnectionName != "" + return o.ClusterID == "" && (o.ConnectionName != "" || o.Accelerator != "") } // SessionIdentifier returns the unique identifier for the session. @@ -202,9 +203,17 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt cancel() }() + // For serverless without explicit --name: auto-generate or reconnect to existing session. + if opts.IsServerlessMode() && opts.ConnectionName == "" && !opts.ProxyMode { + err := resolveServerlessSession(ctx, client, &opts) + if err != nil { + return err + } + } + sessionID := opts.SessionIdentifier() if sessionID == "" { - return errors.New("either --cluster or --name must be provided") + return errors.New("either --cluster or --accelerator must be provided") } if !opts.ProxyMode { @@ -327,6 +336,20 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt cmdio.LogString(ctx, "Connected!") } + // Persist the session for future reconnects. + if opts.IsServerlessMode() && !opts.ProxyMode { + err = sessions.Add(ctx, sessions.Session{ + Name: opts.ConnectionName, + Accelerator: opts.Accelerator, + WorkspaceHost: client.Config.Host, + CreatedAt: time.Now(), + ClusterID: clusterID, + }) + if err != nil { + log.Warnf(ctx, "Failed to save session state: %v", err) + } + } + if opts.ProxyMode { return runSSHProxy(ctx, client, serverPort, clusterID, opts) } else if opts.IDE != "" { @@ -379,7 +402,12 @@ func ensureSSHConfigEntry(ctx context.Context, configPath, hostName, userName, k return fmt.Errorf("failed to generate ProxyCommand: %w", err) } - hostConfig := sshconfig.GenerateHostConfig(hostName, userName, keyPath, proxyCommand) + var hostConfig string + if opts.IsServerlessMode() { + hostConfig = sshconfig.GenerateServerlessHostConfig(hostName, userName, keyPath, proxyCommand) + } else { + hostConfig = sshconfig.GenerateHostConfig(hostName, userName, keyPath, proxyCommand) + } _, err = sshconfig.CreateOrUpdateHostConfig(ctx, hostName, hostConfig, true) if err != nil { @@ -547,15 +575,22 @@ func spawnSSHClient(ctx context.Context, userName, privateKeyPath string, server hostName := opts.SessionIdentifier() + hostKeyChecking := "StrictHostKeyChecking=accept-new" + if opts.IsServerlessMode() { + hostKeyChecking = "StrictHostKeyChecking=no" + } + sshArgs := []string{ "-l", userName, "-i", privateKeyPath, "-o", "IdentitiesOnly=yes", - "-o", "StrictHostKeyChecking=accept-new", + "-o", hostKeyChecking, "-o", "ConnectTimeout=360", "-o", "ProxyCommand=" + proxyCommand, } - if opts.UserKnownHostsFile != "" { + if opts.IsServerlessMode() { + sshArgs = append(sshArgs, "-o", "UserKnownHostsFile=/dev/null") + } else if opts.UserKnownHostsFile != "" { sshArgs = append(sshArgs, "-o", "UserKnownHostsFile="+opts.UserKnownHostsFile) } sshArgs = append(sshArgs, hostName) @@ -703,3 +738,97 @@ func ensureSSHServerIsRunning(ctx context.Context, client *databricks.WorkspaceC return userName, serverPort, effectiveClusterID, nil } + +// resolveServerlessSession handles auto-generation and reconnection for serverless sessions. +// It checks local state for existing sessions matching the workspace and accelerator, +// probes them to see if they're still alive, and prompts the user to reconnect or create new. +func resolveServerlessSession(ctx context.Context, client *databricks.WorkspaceClient, opts *ClientOptions) error { + version := build.GetInfo().Version + + matching, err := sessions.FindMatching(ctx, client.Config.Host, opts.Accelerator) + if err != nil { + log.Warnf(ctx, "Failed to load session state: %v", err) + } + + // Probe sessions to find alive ones (limit to 5 most recent to avoid latency). + const maxProbe = 5 + if len(matching) > maxProbe { + matching = matching[len(matching)-maxProbe:] + } + + var alive []sessions.Session + for _, s := range matching { + _, _, _, probeErr := getServerMetadata(ctx, client, s.Name, s.ClusterID, version, opts.Liteswap) + if probeErr == nil { + alive = append(alive, s) + } else { + cleanupStaleSession(ctx, client, s, version) + } + } + + if len(alive) > 0 && cmdio.IsPromptSupported(ctx) { + choices := make([]string, 0, len(alive)+1) + for _, s := range alive { + choices = append(choices, fmt.Sprintf("Reconnect to %s (started %s)", s.Name, s.CreatedAt.Format(time.RFC822))) + } + choices = append(choices, "Create new session") + + choice, choiceErr := cmdio.AskSelect(ctx, "Found existing sessions:", choices) + if choiceErr != nil { + return fmt.Errorf("failed to prompt user: %w", choiceErr) + } + + for i, s := range alive { + if choice == choices[i] { + opts.ConnectionName = s.Name + cmdio.LogString(ctx, "Reconnecting to session: "+s.Name) + return nil + } + } + } + + // No alive session selected — generate a new name. + opts.ConnectionName = sessions.GenerateSessionName(opts.Accelerator) + cmdio.LogString(ctx, "Creating new session: "+opts.ConnectionName) + return nil +} + +// cleanupStaleSession removes all local and remote artifacts for a stale session. +func cleanupStaleSession(ctx context.Context, client *databricks.WorkspaceClient, s sessions.Session, version string) { + // Remove local SSH keys. + keyPath, err := keys.GetLocalSSHKeyPath(ctx, s.Name, "") + if err == nil { + os.RemoveAll(filepath.Dir(keyPath)) + } + + // Remove SSH config entry. + if err := sshconfig.RemoveHostConfig(ctx, s.Name); err != nil { + log.Debugf(ctx, "Failed to remove SSH config for %s: %v", s.Name, err) + } + + // Delete secret scope (best-effort). + me, err := client.CurrentUser.Me(ctx) + if err == nil { + scopeName := fmt.Sprintf("%s-%s-ssh-tunnel-keys", me.UserName, s.Name) + deleteErr := client.Secrets.DeleteScope(ctx, workspace.DeleteScope{Scope: scopeName}) + if deleteErr != nil { + log.Debugf(ctx, "Failed to delete secret scope %s: %v", scopeName, deleteErr) + } + } + + // Remove workspace content directory (best-effort). + contentDir, err := sshWorkspace.GetWorkspaceContentDir(ctx, client, version, s.Name) + if err == nil { + deleteErr := client.Workspace.Delete(ctx, workspace.Delete{Path: contentDir, Recursive: true}) + if deleteErr != nil { + log.Debugf(ctx, "Failed to delete workspace content for %s: %v", s.Name, deleteErr) + } + } + + // Remove from local state. + if err := sessions.Remove(ctx, s.Name); err != nil { + log.Debugf(ctx, "Failed to remove session %s from state: %v", s.Name, err) + } + + log.Infof(ctx, "Cleaned up stale session: %s", s.Name) +} diff --git a/experimental/ssh/internal/client/client_test.go b/experimental/ssh/internal/client/client_test.go index 57df2fed2c..5821ab7b6c 100644 --- a/experimental/ssh/internal/client/client_test.go +++ b/experimental/ssh/internal/client/client_test.go @@ -18,9 +18,9 @@ func TestValidate(t *testing.T) { wantErr string }{ { - name: "no cluster or connection name", + name: "no cluster or connection name or accelerator", opts: client.ClientOptions{}, - wantErr: "please provide --cluster flag with the cluster ID, or --name flag with the connection name (for serverless compute)", + wantErr: "please provide --cluster or --accelerator flag", }, { name: "proxy mode skips cluster/name check", @@ -31,9 +31,13 @@ func TestValidate(t *testing.T) { opts: client.ClientOptions{ClusterID: "abc-123"}, }, { - name: "accelerator without connection name", + name: "accelerator with cluster ID", opts: client.ClientOptions{ClusterID: "abc-123", Accelerator: "GPU_1xA10"}, - wantErr: "--accelerator flag can only be used with serverless compute (--name flag)", + wantErr: "--accelerator flag can only be used with serverless compute, not with --cluster", + }, + { + name: "accelerator only (auto-generate session name)", + opts: client.ClientOptions{Accelerator: "GPU_1xA10"}, }, { name: "connection name without accelerator", @@ -55,8 +59,9 @@ func TestValidate(t *testing.T) { opts: client.ClientOptions{ConnectionName: "my-conn_1", Accelerator: "GPU_1xA10"}, }, { - name: "both cluster ID and connection name", - opts: client.ClientOptions{ClusterID: "abc-123", ConnectionName: "my-conn", Accelerator: "GPU_1xA10"}, + name: "both cluster ID and connection name (no accelerator)", + opts: client.ClientOptions{ClusterID: "abc-123", ConnectionName: "my-conn"}, + wantErr: "--name flag requires --accelerator to be set (for now we only support serverless GPU compute)", }, { name: "proxy mode with invalid connection name", diff --git a/experimental/ssh/internal/sessions/namegen.go b/experimental/ssh/internal/sessions/namegen.go new file mode 100644 index 0000000000..ab66a93eba --- /dev/null +++ b/experimental/ssh/internal/sessions/namegen.go @@ -0,0 +1,28 @@ +package sessions + +import ( + "crypto/rand" + "encoding/hex" + "strings" + "time" +) + +// acceleratorPrefixes maps known accelerator types to short human-readable prefixes. +var acceleratorPrefixes = map[string]string{ + "GPU_1xA10": "gpu-a10", + "GPU_8xH100": "gpu-h100", +} + +// GenerateSessionName creates a human-readable session name from the accelerator type. +// Format: -, e.g. "gpu-a10-f3a2b1c0". +func GenerateSessionName(accelerator string) string { + prefix, ok := acceleratorPrefixes[accelerator] + if !ok { + prefix = strings.ToLower(strings.ReplaceAll(accelerator, "_", "-")) + } + + date := time.Now().Format("20060102") + b := make([]byte, 3) + _, _ = rand.Read(b) + return "databricks-" + prefix + "-" + date + "-" + hex.EncodeToString(b) +} diff --git a/experimental/ssh/internal/sessions/sessions.go b/experimental/ssh/internal/sessions/sessions.go new file mode 100644 index 0000000000..15d93a4002 --- /dev/null +++ b/experimental/ssh/internal/sessions/sessions.go @@ -0,0 +1,147 @@ +package sessions + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/databricks/cli/libs/env" +) + +const ( + stateFileName = "ssh-tunnel-sessions.json" + + // Sessions older than this are considered expired and cleaned up automatically. + sessionMaxAge = 24 * time.Hour +) + +// Session represents a tracked SSH tunnel session. +type Session struct { + Name string `json:"name"` + Accelerator string `json:"accelerator"` + WorkspaceHost string `json:"workspace_host"` + CreatedAt time.Time `json:"created_at"` + ClusterID string `json:"cluster_id,omitempty"` +} + +// SessionStore holds all tracked sessions. +type SessionStore struct { + Sessions []Session `json:"sessions"` +} + +func getStateFilePath(ctx context.Context) (string, error) { + homeDir, err := env.UserHomeDir(ctx) + if err != nil { + return "", fmt.Errorf("failed to get home directory: %w", err) + } + return filepath.Join(homeDir, ".databricks", stateFileName), nil +} + +// Load reads the session store from disk. Returns an empty store if the file does not exist. +func Load(ctx context.Context) (*SessionStore, error) { + path, err := getStateFilePath(ctx) + if err != nil { + return nil, err + } + + data, err := os.ReadFile(path) + if os.IsNotExist(err) { + return &SessionStore{}, nil + } + if err != nil { + return nil, fmt.Errorf("failed to read session state file: %w", err) + } + + var store SessionStore + if err := json.Unmarshal(data, &store); err != nil { + return nil, fmt.Errorf("failed to parse session state file: %w", err) + } + return &store, nil +} + +// Save writes the session store to disk atomically. +func Save(ctx context.Context, store *SessionStore) error { + path, err := getStateFilePath(ctx) + if err != nil { + return err + } + + if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { + return fmt.Errorf("failed to create state directory: %w", err) + } + + data, err := json.MarshalIndent(store, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal session state: %w", err) + } + + // Atomic write: write to temp file, then rename. + tmpPath := path + ".tmp" + if err := os.WriteFile(tmpPath, data, 0o600); err != nil { + return fmt.Errorf("failed to write session state file: %w", err) + } + if err := os.Rename(tmpPath, path); err != nil { + return fmt.Errorf("failed to rename session state file: %w", err) + } + return nil +} + +// Add persists a new session to the store, replacing any existing session with the same name. +func Add(ctx context.Context, s Session) error { + store, err := Load(ctx) + if err != nil { + return err + } + + // Replace existing session with the same name. + found := false + for i, existing := range store.Sessions { + if existing.Name == s.Name { + store.Sessions[i] = s + found = true + break + } + } + if !found { + store.Sessions = append(store.Sessions, s) + } + + return Save(ctx, store) +} + +// Remove deletes a session by name. +func Remove(ctx context.Context, name string) error { + store, err := Load(ctx) + if err != nil { + return err + } + + filtered := store.Sessions[:0] + for _, s := range store.Sessions { + if s.Name != name { + filtered = append(filtered, s) + } + } + store.Sessions = filtered + return Save(ctx, store) +} + +// FindMatching returns non-expired sessions that match the given workspace host and accelerator. +func FindMatching(ctx context.Context, workspaceHost, accelerator string) ([]Session, error) { + store, err := Load(ctx) + if err != nil { + return nil, err + } + + cutoff := time.Now().Add(-sessionMaxAge) + var result []Session + for _, s := range store.Sessions { + if s.WorkspaceHost == workspaceHost && s.Accelerator == accelerator && s.CreatedAt.After(cutoff) { + result = append(result, s) + } + } + return result, nil +} diff --git a/experimental/ssh/internal/sessions/sessions_test.go b/experimental/ssh/internal/sessions/sessions_test.go new file mode 100644 index 0000000000..9ccc82aa5e --- /dev/null +++ b/experimental/ssh/internal/sessions/sessions_test.go @@ -0,0 +1,183 @@ +package sessions + +import ( + "path/filepath" + "regexp" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoadEmpty(t *testing.T) { + t.Setenv("HOME", t.TempDir()) + t.Setenv("USERPROFILE", t.TempDir()) + + store, err := Load(t.Context()) + require.NoError(t, err) + assert.Empty(t, store.Sessions) +} + +func TestSaveAndLoad(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("HOME", tmpDir) + t.Setenv("USERPROFILE", tmpDir) + + store := &SessionStore{ + Sessions: []Session{ + { + Name: "gpu-a10-abcd1234", + Accelerator: "GPU_1xA10", + WorkspaceHost: "https://test.databricks.com", + CreatedAt: time.Date(2026, 3, 10, 12, 0, 0, 0, time.UTC), + ClusterID: "0310-120000-abc", + }, + }, + } + + err := Save(t.Context(), store) + require.NoError(t, err) + + loaded, err := Load(t.Context()) + require.NoError(t, err) + require.Len(t, loaded.Sessions, 1) + assert.Equal(t, "gpu-a10-abcd1234", loaded.Sessions[0].Name) + assert.Equal(t, "GPU_1xA10", loaded.Sessions[0].Accelerator) + assert.Equal(t, "https://test.databricks.com", loaded.Sessions[0].WorkspaceHost) + assert.Equal(t, "0310-120000-abc", loaded.Sessions[0].ClusterID) +} + +func TestAddAndRemove(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("HOME", tmpDir) + t.Setenv("USERPROFILE", tmpDir) + + ctx := t.Context() + + err := Add(ctx, Session{Name: "sess-1", Accelerator: "GPU_1xA10", WorkspaceHost: "https://a.com"}) + require.NoError(t, err) + + err = Add(ctx, Session{Name: "sess-2", Accelerator: "GPU_8xH100", WorkspaceHost: "https://b.com"}) + require.NoError(t, err) + + store, err := Load(ctx) + require.NoError(t, err) + assert.Len(t, store.Sessions, 2) + + err = Remove(ctx, "sess-1") + require.NoError(t, err) + + store, err = Load(ctx) + require.NoError(t, err) + require.Len(t, store.Sessions, 1) + assert.Equal(t, "sess-2", store.Sessions[0].Name) +} + +func TestRemoveNonExistent(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("HOME", tmpDir) + t.Setenv("USERPROFILE", tmpDir) + + err := Remove(t.Context(), "no-such-session") + assert.NoError(t, err) +} + +func TestFindMatching(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("HOME", tmpDir) + t.Setenv("USERPROFILE", tmpDir) + + ctx := t.Context() + host := "https://test.databricks.com" + + now := time.Now() + + err := Add(ctx, Session{Name: "s1", Accelerator: "GPU_1xA10", WorkspaceHost: host, CreatedAt: now}) + require.NoError(t, err) + err = Add(ctx, Session{Name: "s2", Accelerator: "GPU_8xH100", WorkspaceHost: host, CreatedAt: now}) + require.NoError(t, err) + err = Add(ctx, Session{Name: "s3", Accelerator: "GPU_1xA10", WorkspaceHost: "https://other.com", CreatedAt: now}) + require.NoError(t, err) + err = Add(ctx, Session{Name: "s4", Accelerator: "GPU_1xA10", WorkspaceHost: host, CreatedAt: now}) + require.NoError(t, err) + + matches, err := FindMatching(ctx, host, "GPU_1xA10") + require.NoError(t, err) + assert.Len(t, matches, 2) + assert.Equal(t, "s1", matches[0].Name) + assert.Equal(t, "s4", matches[1].Name) + + matches, err = FindMatching(ctx, host, "GPU_8xH100") + require.NoError(t, err) + assert.Len(t, matches, 1) + assert.Equal(t, "s2", matches[0].Name) + + matches, err = FindMatching(ctx, host, "GPU_4xA100") + require.NoError(t, err) + assert.Empty(t, matches) +} + +func TestFindMatchingExpiresOldSessions(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("HOME", tmpDir) + t.Setenv("USERPROFILE", tmpDir) + + ctx := t.Context() + host := "https://test.databricks.com" + + err := Add(ctx, Session{Name: "old", Accelerator: "GPU_1xA10", WorkspaceHost: host, CreatedAt: time.Now().Add(-25 * time.Hour)}) + require.NoError(t, err) + err = Add(ctx, Session{Name: "recent", Accelerator: "GPU_1xA10", WorkspaceHost: host, CreatedAt: time.Now()}) + require.NoError(t, err) + + matches, err := FindMatching(ctx, host, "GPU_1xA10") + require.NoError(t, err) + require.Len(t, matches, 1) + assert.Equal(t, "recent", matches[0].Name) +} + +func TestStateFilePath(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("HOME", tmpDir) + t.Setenv("USERPROFILE", tmpDir) + + path, err := getStateFilePath(t.Context()) + require.NoError(t, err) + assert.Equal(t, filepath.Join(tmpDir, ".databricks", stateFileName), path) +} + +// connectionNameRegex mirrors the regex in client.go. +var connectionNameRegex = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_-]*$`) + +func TestGenerateSessionName(t *testing.T) { + tests := []struct { + accelerator string + wantPrefix string + wantDatePrefix string + }{ + {"GPU_1xA10", "databricks-gpu-a10-", "databricks-gpu-a10-20"}, + {"GPU_8xH100", "databricks-gpu-h100-", "databricks-gpu-h100-20"}, + {"UNKNOWN_TYPE", "databricks-unknown-type-", "databricks-unknown-type-20"}, + } + + for _, tt := range tests { + t.Run(tt.accelerator, func(t *testing.T) { + name := GenerateSessionName(tt.accelerator) + assert.Greater(t, len(name), len(tt.wantPrefix), "name should be longer than prefix") + assert.Equal(t, tt.wantPrefix, name[:len(tt.wantPrefix)]) + // Verify date component is present (starts with "20" for 2000s dates). + assert.Equal(t, tt.wantDatePrefix, name[:len(tt.wantDatePrefix)]) + assert.True(t, connectionNameRegex.MatchString(name), "generated name %q must match connection name regex", name) + }) + } +} + +func TestGenerateSessionNameUniqueness(t *testing.T) { + seen := make(map[string]bool) + for range 100 { + name := GenerateSessionName("GPU_1xA10") + assert.False(t, seen[name], "duplicate name generated: %s", name) + seen[name] = true + } +} diff --git a/experimental/ssh/internal/sshconfig/sshconfig.go b/experimental/ssh/internal/sshconfig/sshconfig.go index f6886a4be9..2e37e3c1e1 100644 --- a/experimental/ssh/internal/sshconfig/sshconfig.go +++ b/experimental/ssh/internal/sshconfig/sshconfig.go @@ -160,14 +160,45 @@ func PromptRecreateConfig(ctx context.Context, hostName string) (bool, error) { return response, nil } +// RemoveHostConfig deletes the SSH config file for a given host name. +func RemoveHostConfig(ctx context.Context, hostName string) error { + configPath, err := GetHostConfigPath(ctx, hostName) + if err != nil { + return err + } + err = os.Remove(configPath) + if os.IsNotExist(err) { + return nil + } + return err +} + +// GenerateHostConfig generates an SSH host config block. func GenerateHostConfig(hostName, userName, identityFile, proxyCommand string) string { + return generateHostConfig(hostName, userName, identityFile, proxyCommand, false) +} + +// GenerateServerlessHostConfig generates an SSH host config block for serverless compute. +// It disables strict host key checking since serverless containers generate fresh keys each time, +// and identity is already verified through Databricks authentication and Driver Proxy. +func GenerateServerlessHostConfig(hostName, userName, identityFile, proxyCommand string) string { + return generateHostConfig(hostName, userName, identityFile, proxyCommand, true) +} + +func generateHostConfig(hostName, userName, identityFile, proxyCommand string, serverless bool) string { + hostKeyChecking := "StrictHostKeyChecking accept-new" + knownHostsLine := "" + if serverless { + hostKeyChecking = "StrictHostKeyChecking no" + knownHostsLine = " UserKnownHostsFile /dev/null\n" + } return fmt.Sprintf(` Host %s User %s ConnectTimeout 360 - StrictHostKeyChecking accept-new - IdentitiesOnly yes + %s +%s IdentitiesOnly yes IdentityFile %q ProxyCommand %s -`, hostName, userName, identityFile, proxyCommand) +`, hostName, userName, hostKeyChecking, knownHostsLine, identityFile, proxyCommand) }