diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ee26633013d5..3cccd4af52dc0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1960,7 +1960,7 @@ sessions remains deny-by-default but now only `join_sessions` statements are checked for session join RBAC. See the [Moderated Sessions -guide](docs/pages/admin-guides/access-controls/guides/moderated-sessions.mdx) for more +guide](docs/pages/admin-guides/access-controls/guides/joining-sessions.mdx) for more details. #### GitHub connectors @@ -2419,7 +2419,7 @@ With Moderated Sessions, Teleport administrators can define policies that allow users to invite other users to participate in SSH or Kubernetes sessions as observers, moderators or peers. -[Moderated Sessions guide](docs/pages/admin-guides/access-controls/guides/moderated-sessions.mdx) +[Moderated Sessions guide](docs/pages/admin-guides/access-controls/guides/joining-sessions.mdx) ### Breaking Changes diff --git a/api/constants/constants.go b/api/constants/constants.go index 9f66302da597d..420df3992cdb6 100644 --- a/api/constants/constants.go +++ b/api/constants/constants.go @@ -405,6 +405,10 @@ const ( // TraitHostUserGID is the name of the variable used to specify // the GID to create host user account with. TraitHostUserGID = "host_user_gid" + + // TraitGitHubOrgs is the name of the variable to specify the GitHub + // organizations for GitHub integration. + TraitGitHubOrgs = "github_orgs" ) const ( diff --git a/constants.go b/constants.go index 20f7257413cc4..301ea5ee733c0 100644 --- a/constants.go +++ b/constants.go @@ -642,6 +642,10 @@ const ( // TraitInternalJWTVariable is the variable used to store JWT token for // app sessions. TraitInternalJWTVariable = "{{internal.jwt}}" + + // TraitInternalGitHubOrgs is the variable used to store allowed GitHub + // organizations for GitHub integrations. + TraitInternalGitHubOrgs = "{{internal.github_orgs}}" ) // SCP is Secure Copy. diff --git a/docs/config.json b/docs/config.json index 9485a6085f5df..f8ff91ae673e6 100644 --- a/docs/config.json +++ b/docs/config.json @@ -214,6 +214,11 @@ } }, "redirects": [ + { + "source": "/admin-guides/access-controls/guides/moderated-sessions/", + "destination": "/admin-guides/access-controls/guides/joining-sessions/", + "permanent": true + }, { "source": "/reference/operator-resources/resources.teleport.dev_accesslists/", "destination": "/reference/operator-resources/resources-teleport-dev-accesslists/", diff --git a/docs/img/webui_billing_cycle.png b/docs/img/webui_billing_cycle.png new file mode 100644 index 0000000000000..bfa7b31704107 Binary files /dev/null and b/docs/img/webui_billing_cycle.png differ diff --git a/docs/pages/admin-guides/access-controls/guides/moderated-sessions.mdx b/docs/pages/admin-guides/access-controls/guides/joining-sessions.mdx similarity index 56% rename from docs/pages/admin-guides/access-controls/guides/moderated-sessions.mdx rename to docs/pages/admin-guides/access-controls/guides/joining-sessions.mdx index 29c2d4513275e..4a792fc6c9e67 100644 --- a/docs/pages/admin-guides/access-controls/guides/moderated-sessions.mdx +++ b/docs/pages/admin-guides/access-controls/guides/joining-sessions.mdx @@ -1,6 +1,6 @@ --- -title: Moderated Sessions -description: Describes the purpose of moderated sessions and how to configure roles to support moderated sessions in a Teleport cluster. +title: Joining Sessions +description: Describes shared sessions and how to configure roles to support joining sessions in a Teleport cluster. keywords: - SSH - Kubernetes @@ -8,45 +8,144 @@ keywords: - audit --- -Moderated sessions allow you to define requirements for other users to be present -in an active server or Kubernetes session started by another user. Depending on the -requirements you specify, users who are allowed to join other users' sessions can be -granted permission to do the following: +Teleport allows multiple users to join the same SSH or `kubectl exec` session. +Session joining can be performed via the web UI's Active Sessions page, or by +using the `tsh join` command. -- Observe another user's session in real time. -- Participate interactively in another user's session. -- Terminate another user's session at will. +## Participant modes + +When joining a session, users can be in one of three participant modes: + +- **Observer**: Can only view the session. +- **Peer**: Can view the session and interact with it as if they were the session owner. +- **Moderator**: (Enterprise only) Can view the session and terminate it. + +Users can join SSH sessions from the command line or from the Teleport web UI, +but Kubernetes sessions can only be joined from the command line. + +The web UI forces users to select a join mode prior to joining. If you join a +session with `tsh join` or `tsh kube join`, you can specify a participant mode +with the `--mode ` command-line option, where `` is `peer`, +`moderator`, or `observer`. The default participant mode is `observer`. + +You can leave a session with the shortcut `^c` (Control + c) while in observer or +moderator mode. In moderator mode, you can also forcefully terminate the session +at any point in time by pressing `t`. + +## Access controls + +You can use the `join_sessions` field of a role to specify the sessions users +can join and under what conditions they can join a session. For example, the +following role allows users to join both SSH and Kubernetes sessions started by +users with the `prod-access` role and to join the session as a moderator or an +observer: + +```yaml +kind: role +metadata: + name: allow-session-join +version: v7 +spec: + allow: + join_sessions: + - name: Join prod sessions + roles : ['prod-access'] + kinds: ['k8s', 'ssh'] + modes: ['moderator', 'observer'] +``` + +Users who are assigned a role with a `join_sessions` allow policy are +implicitly allowed to list the sessions that the policy gives them permission +to join. If there's a `deny` rule that prevents listing sessions, the +`join_sessions` policy overrides the `deny` rule for the sessions the +policy allows the user to join. Outside of this exception for joining +sessions, `deny` statements take precedent. + +The following are required fields for `join_sessions`: + +|Option|Type|Description| +|---|---|---| +|`name`|String|The name of the allow policy.| +|`roles`|List|A list of Teleport role names that the allow policy applies to. Active sessions created by users with these roles can be joined under this policy.| +|`kinds`|List|The kind of sessions—SSH, Kubernetes, or both—that the allow policy applies to. The valid options are `ssh` and `k8s`.| +|`modes`|List|The participant mode—`observer`, `moderator`, or `peer`—that the user joining the session can use to join the session. The default mode is `observer`.| + +If you want to allow users to list active sessions without giving them +permission to join these sessions, you can grant them `list` permissions on the +`session_tracker` resource. + +```yaml +kind: role +metadata: + name: list-active-sessions +version: v7 +spec: + allow: + rules: + - resources: [ session_tracker ] + verbs: [ list ] +``` + +Teleport also supports explicit deny rules on the `ssh_session` resource for compatibility +with legacy Teleport roles, but we do not encourage the use of the `ssh_session` resource +in new roles. + +## Moderated sessions + +In Teleport Enterprise, you can configure roles to require that sessions are joined +by 1 or more additional participants before they are allowed to start. Sessions that +require additional participants are called moderated sessions. The most common use cases for moderated sessions involve the following scenarios: - You have strict security or compliance requirements and need to have people watching over user-initiated sessions on a set of servers. -- You want to share a terminal with someone else to be able to instruct or collaborate. - You need the ability to pause or terminate active sessions. -Note that you can share terminal sessions using any Teleport edition. However, -you must have Teleport Enterprise if you want to require active sessions to be -observed or moderated. +### Example -## Require and allow policies +In the following example, Jeff's role requires additional participants before it can start. -Moderated sessions use roles to provide fine grained control over who can join a session -and who is required to be present to start one. +```code +$ tsh ssh ubuntu@prod.teleport.example.com +Teleport > Creating session with ID: 46e2af03-62d6-4e07-a886-43fe741ca044... +Teleport > Controls + - CTRL-C: Leave the session + - t: Forcefully terminate the session (moderators only) +Teleport > User jeff joined the session. +Teleport > Waiting for required participants... +``` -There are two types of policies you can use to control moderated sessions: +Jeff's session is paused, waiting for the required participants. When Alice, who is +assigned the `auditor` role, joins the waiting session as a moderator, the +session can begin. For example: -- **Require** policies define a set of conditions that must be a met for a session to - start or run. A user assigned a role with a require policy must meet the minimum - requirements of the policy to start the session that the policy applies to. -- **Allow** policies define what sessions users can join and under what conditions - they can join a session. +```code +$ tsh join --mode=moderator 46e2af03-62d6-4e07-a886-43fe741ca044 +Teleport > Creating session with ID: 46e2af03-62d6-4e07-a886-43fe741ca044... +Teleport > Controls + - CTRL-C: Leave the session + - t: Forcefully terminate the session (moderators only) +Teleport > User jeff joined the session. +Teleport > Waiting for required participants... +Teleport > User alice joined the session. +Teleport > Connecting to prod.teleport.example.com over SSH -## Configure a require policy +ubuntu@prod.teleport.example.com % +``` + +Because this session is an SSH session, Alice could also join from the +Teleport Web UI. For example: + +![Join Server Session from UI](../../../../img/webui-active-session.png) -In Teleport Enterprise editions, you can use `require_session_join` in a role to specify -the conditions that must be a met for a session to start or run. For example, the following -policy specifies that users assigned the `prod-access` role must have a minimum of one user -with the `auditor` role and the `moderator` mode present to start SSH or Kubernetes sessions: +### Access controls + +Moderated sessions are configured via the `require_session_join` section of a +role. This section defines the conditions that must be met for a session to +start or run. For example, the following policy specifies that users assigned +the `prod-access` role must have a minimum of one user with the `auditor` role +present in the `moderator` mode to start SSH or Kubernetes sessions: ```yaml kind: role @@ -56,22 +155,16 @@ version: v7 spec: allow: require_session_join: - - name: Auditor oversight + - name: Require one moderator filter: 'contains(user.spec.roles, "auditor")' kinds: ['k8s', 'ssh'] modes: ['moderator'] count: 1 - logins: - - ubuntu - - debian - node_labels: - env: prod - kubernetes_labels: - env: prod - kubernetes_groups: - - prod-access - kubernetes_users: - - USER + logins: [ ubuntu, debian ] + node_labels: { env: prod } + kubernetes_labels: { env: prod } + kubernetes_groups: [ prod-access ] + kubernetes_users: [ USER ] kubernetes_resources: - kind: '*' name: '*' @@ -79,17 +172,11 @@ spec: verbs: ['*'] ``` -Because this sample policy requires that at least one user with the `auditor` role to be present -as a moderator to start SSH or Kubernetes sessions, a user assigned this `prod-access` role -won't be able to start any sessions until the policy requirements are fulfilled. - The `require_session_join` rules apply to all of the user's sessions, including those that are accessible via other roles. If you do not want to require moderation for user sessions, we recommend using Access Requests to temporarily assume a role for resources that should require moderation. -### Required fields - The following are required fields for `require_session_join`: |Option|Type|Description| @@ -98,14 +185,33 @@ The following are required fields for `require_session_join`: |`filter`|Filter|An expression that, if it evaluates to true for a given user, enables the user to be present in a moderated session.| |`kinds`|List|The kind of session—SSH, Kubernetes, or both—that the policy applies to. The valid options are `ssh` and `k8s`.| |`modes`|List|The participant mode—`observer`, `moderator`, or `peer`—that the user joining the moderated session must match to satisfy the policy.| -|`count`|Integer|The minimum number of users that must match the filter expression to satisfy the policy.| +|`count`|Integer|The minimum number of users that must join the session to satisfy the policy.| + +The following fields are optional for `require_session_join`: + +|Option|Type|Description| +|---|---|---| +|`on_leave`|String|The action to take when the policy is no longer satisfied.| + +You can use the `on_leave` field in require policies to define what happens +when a participant leaves a session and causes the policy to no longer be satisfied. +There are two possible values for this field: + +- `terminate` to terminate the session immediately and disconnect all participants. +- `pause` to pause the session and stop any input/output streaming until the policy is satisfied again. + +By default, Teleport treats an empty string in this field the same as `terminate`. + +If all require policies attached to the session owner are set to `pause`, the +session discards all input from session participants and buffers the most recent +output but the session remains open so it can resume. -#### Filter expressions +### Filter expressions Filter expressions allow for more detailed control over the scope of a policy. For example, you can use a filter expression to specify which users are required -to be present in a session. The filter has a `user` object as its context that you -can refine to match the `roles` and `name` fields you specify. +to be present in a session. The filter has a `user` object as its context that +you can refine to match the `roles` and `name` fields you specify. In the following example, the filter expression evaluates to true if the user's name is `adam` or if the user has the role `cs-observe`: @@ -124,33 +230,6 @@ Filter expressions support the following functions and operators: - `[expr] && [expr]`: Performs a logical AND on two Boolean expressions. - `[expr] || [expr]`: Performs a logical OR on two Boolean expressions. -#### Matching user count - -You can use the `count` field in a require policy to specify the minimum number -of users matching the filter expression who must be present in a session to satisfy -the policy. - -### Optional fields - -The following field is optional for `require_session_join`: - -|Option|Type|Description| -|---|---|---| -|`on_leave`|String|The action to take when the policy is no longer satisfied.| - -You can use the `on_leave` field in require policies to define what happens -when a moderator leaves a session and causes the policy to no longer be satisfied. -There are two possible values for this field: - -- `terminate` to terminate the session immediately and disconnect all participants. -- `pause` to pause the session and stop any input/output streaming until the policy is satisfied again. - -By default, Teleport treats an empty string in this field the same as `terminate`. - -If all require policies attached to the session owner are set to `pause`, the session -discards all input from session participants and buffers the most recent output but -the session remains open so it can resume. - ### Combining require policies and roles In evaluating policies and roles, all of the require policies within a role are evaluated using an @@ -169,130 +248,23 @@ you must include the `require_session_join` policy in the mapped role defined on For more information about configuring trust relationships and role mapping between root and leaf clusters, see [Configure Trusted Clusters](../../management/admin/trustedclusters.mdx). -## Configure an allow policy - -You can use `join_sessions` in a role to specify the sessions users can join and under what conditions -they can join a session. For example, the following policy is attached to the `auditor` role and allows -a user assigned to the auditor role to join SSH and Kubernetes sessions started by a user with the -role `prod-access` and to join the session as a moderator or an observer: - -```yaml -kind: role -metadata: - name: auditor -version: v7 -spec: - allow: - join_sessions: - - name: Join prod sessions - roles : ['prod-access'] - kinds: ['k8s', 'ssh'] - modes: ['moderator', 'observer'] -``` - -Users who are assigned a role with a `join_sessions` allow policy are -implicitly allowed to list the sessions that the policy gives them permission -to join. If there's a `deny` rule that prevents listing sessions, the -`join_sessions` policy overrides the `deny` rule for the sessions the -policy allows the user to join. Outside of this exception for joining -sessions, `deny` statements take precedent. - -### Required fields - -The following are required fields for `join_sessions`: - -|Option|Type|Description| -|---|---|---| -|`name`|String|The name of the allow policy.| -|`roles`|List|A list of Teleport role names that the allow policy applies to. Active sessions created by users with these roles can be joined under this policy.| -|`kinds`|List|The kind of sessions—SSH, Kubernetes, or both—that the allow policy applies to. The valid options are `ssh` and `k8s`.| -|`modes`|List|The participant mode—`observer`, `moderator`, or `peer`—that the user joining the session can use to join the session. The default mode is `observer`.| - -### Joining a session from the command line - -In the following example, Jeff is assigned the `prod-access` role and attempts to connect to -a server in the production environment using `tsh ssh`: - -```code -$ tsh ssh ubuntu@prod.teleport.example.com -Teleport > Creating session with ID: 46e2af03-62d6-4e07-a886-43fe741ca044... -Teleport > Controls - - CTRL-C: Leave the session - - t: Forcefully terminate the session (moderators only) -Teleport > User jeff joined the session. -Teleport > Waiting for required participants... -``` - -Jeff's session is paused, waiting for the required observers. -When Alice, who is assigned the `auditor` role, joins the waiting session -as a moderator, the session can begin. -For example: - -```code -$ tsh join --mode=moderator 46e2af03-62d6-4e07-a886-43fe741ca044 -Teleport > Creating session with ID: 46e2af03-62d6-4e07-a886-43fe741ca044... -Teleport > Controls - - CTRL-C: Leave the session - - t: Forcefully terminate the session (moderators only) -Teleport > User jeff joined the session. -Teleport > Waiting for required participants... -Teleport > User alice joined the session. -Teleport > Connecting to prod.teleport.example.com over SSH - -ubuntu@prod.teleport.example.com % -``` - -Because this session is an SSH session, Alice could also join from the -Teleport Web UI. For example: - -![Join Server Session from UI](../../../../img/webui-active-session.png) - -### Participant modes - -A participant joining a session will always have one of three modes: - -- `observer`: Allows read-only access to the session. You can view output but - cannot control the session in any way nor send any input. -- `moderator`: Allows you to watch the session. You can view output and forcefully - terminate or pause the session at any time, but can't send input. -- `peer`: Allows you to collaborate in the session. You can view output and send input. - -If you join a session with `tsh join` or `tsh kube join`, you can specify a -participant mode with the `--mode ` command-line option, where `` is `peer`, -`moderator`, or `observer`. The default participant mode is `observer`. - -You can leave a session with the shortcut `^c` (Control + c) while in observer or -moderator mode. In moderator mode, you can also forcefully terminate the session -at any point in time with the shortcut `t`. - ### Multifactor authentication -If `per_session_mfa` is set to `true` in role or cluster settings, Teleport requires -multifactor authentication checks when starting new sessions. This requirement is -also enforced for session moderators. Therefore, moderators who want to join a session -must have configured a device for multifactor authentication. - -Every 30 seconds, Teleport prompts session moderators to re-authenticate within the -next 15 seconds. This behavior continues throughout the session to ensure that -moderators are always present and watching a given session. - -If no MFA input is received within 60 seconds, the user is disconnected from the -session, which might cause the session to terminate or pause because a require policy -is no longer satisfied. - -## Session kinds - -Require and allow policies have to specify which sessions they apply to. Valid -options are `ssh` and `k8s`. +If per-session MFA is enabled then moderators who want to join a session will +need to satisfy the same MFA checks as if they were starting a session of their +own. -- `ssh` policies apply to all SSH sessions on a node running the Teleport SSH server. -- `k8s` policies apply to all Kubernetes sessions on clusters connected to Teleport. +Teleport will also enforce additional presence checks for moderated sessions +when per-session MFA is required. Every 30 seconds, Teleport prompts session +moderators verify their presence by re-authenticating with their MFA device +within the next 15 seconds. This behavior continues throughout the session to +ensure that moderators are always present and watching a given session. -Users with the `join_sessions` permission for SSH sessions can join sessions from the -command line or from the Teleport Web UI. Users with the `join_sessions` permission for -Kubernetes sessions can only join session from the command line. +If no MFA input is received within 60 seconds, the moderator is disconnected +from the session, which might cause the session to terminate or pause because a +require policy is no longer satisfied. -## Session invites +### Session invites When starting an interactive SSH or Kubernetes session using `tsh ssh` or `tsh kube exec` respectively, you can supply the `--reason ` or `--invited ` command-line @@ -303,11 +275,12 @@ This information is propagated to the `session_tracker` resource, which can be used to with a third party, for example, to enable notifications over some external communication system. -## File transfers +### File transfers -File transfers within moderated sessions are only supported when using the Teleport Web UI. -If the current active session requires moderation, file transfer requests are automatically -sent to all current session participants. +SFTP file transfers within moderated sessions are only supported when using the +Teleport Web UI. If the current active session requires moderation, file +transfer requests are automatically sent to all current session participants and +must be approved before the file transfer can begin. Both the session originator and the moderator(s) must be present in the Teleport Web UI during the file transfer initiation to receive the file transfer request notification. @@ -322,6 +295,6 @@ all session participants are notified. After enough approvals have been given to satisfy the policy used to start the session, the file transfer automatically begins. -## Related documentation +## See also - [Moderated Sessions](/~https://github.com/gravitational/teleport/blob/master/rfd/0043-kubeaccess-multiparty.md) diff --git a/docs/pages/admin-guides/access-controls/guides/webauthn.mdx b/docs/pages/admin-guides/access-controls/guides/webauthn.mdx index 425152bc0293a..8d21076257e2f 100644 --- a/docs/pages/admin-guides/access-controls/guides/webauthn.mdx +++ b/docs/pages/admin-guides/access-controls/guides/webauthn.mdx @@ -6,14 +6,14 @@ videoBanner: vQgKkD4ZRDU This guide aims to help you fortify your identity infrastructure and mitigate the risks associated with IdP weaknesses. -An IdP compromise occurs when an attacker gains unauthorized access to your identity management -system, potentially allowing them to impersonate legitimate users, escalate privileges, or access -sensitive information. This can happen through various means, such as exploiting software vulnerabilities, +An IdP compromise occurs when an attacker gains unauthorized access to your identity management +system, potentially allowing them to impersonate legitimate users, escalate privileges, or access +sensitive information. This can happen through various means, such as exploiting software vulnerabilities, stealing credentials, or social engineering attacks. -While many organizations have implemented basic security measures like single sign-on (SSO) and multi-factor authentication (MFA), -these alone may not be sufficient to protect against sophisticated attacks targeting your IdP. -Attackers are constantly evolving their techniques, and traditional security measures may have limitations +While many organizations have implemented basic security measures like single sign-on (SSO) and multi-factor authentication (MFA), +these alone may not be sufficient to protect against sophisticated attacks targeting your IdP. +Attackers are constantly evolving their techniques, and traditional security measures may have limitations or vulnerabilities that can be exploited. ![IdP threat vector tree](../../../../img/access-controls/idp-graph.png) @@ -21,15 +21,15 @@ or vulnerabilities that can be exploited. To enhance your defense against IdP compromises, we recommend implementing the following comprehensive security measures. ## Set up cluster-wide WebAuthn -Implement strong, phishing-resistant authentication across -your entire infrastructure using WebAuthn standards. WebAuthn, a W3C standard and part of FIDO2, -enables public-key cryptography for web authentication. Teleport supports WebAuthn as a multi-factor -for logging into Teleport (via tsh login or Web UI) and accessing SSH nodes or Kubernetes clusters. +Implement strong, phishing-resistant authentication across +your entire infrastructure using WebAuthn standards. WebAuthn, a W3C standard and part of FIDO2, +enables public-key cryptography for web authentication. Teleport supports WebAuthn as a multi-factor +for logging into Teleport (via tsh login or Web UI) and accessing SSH nodes or Kubernetes clusters. It's compatible with hardware keys (e.g., YubiKeys, SoloKeys) and biometric authenticators like Touch ID and Windows Hello. ### Prerequisites -- A running Teleport cluster or Teleport Cloud, version 16 or later. If you want to get started with Teleport, +- A running Teleport cluster or Teleport Cloud, version 16 or later. If you want to get started with Teleport, [sign up](https://goteleport.com/signup) for a free trial. - The `tctl` admin tool and `tsh` client tool. @@ -159,8 +159,8 @@ $ tsh login --proxy=example.teleport.sh ## Configure per-session MFA -Ensure that multi-factor authentication is required for each session, not just at initial login, -to maintain continuous security. Teleport's per-session MFA enhances security by protecting +Ensure that multi-factor authentication is required for each session, not just at initial login, +to maintain continuous security. Teleport's per-session MFA enhances security by protecting against compromised on-disk certificates. It requires additional MFA checks when initiating new SSH, Kubernetes, database, or desktop sessions. Teleport supports requiring additional multi-factor authentication checks @@ -222,19 +222,19 @@ spec: ``` ## Implement cluster-wide Device Trust -Develop a system to verify and manage trusted devices across your organization, reducing the risk -of unauthorized access from unknown or compromised devices. Device Trust adds an extra layer of security by requiring the use of trusted devices -for accessing protected resources, complementing user identity and role enforcement. This can be -configured cluster-wide or via RBAC. Supported resources include apps (role-based only), SSH nodes, +Develop a system to verify and manage trusted devices across your organization, reducing the risk +of unauthorized access from unknown or compromised devices. Device Trust adds an extra layer of security by requiring the use of trusted devices +for accessing protected resources, complementing user identity and role enforcement. This can be +configured cluster-wide or via RBAC. Supported resources include apps (role-based only), SSH nodes, databases, Kubernetes clusters, and first MFA device enrollment. The latter helps prevent auto-provisioning of new users through compromised IdPs. - We do not currently support Machine ID and Device Trust. Requiring Device Trust - cluster-wide or for roles impersonated by Machine ID will prevent credentials + We do not currently support Machine ID and Device Trust. Requiring Device Trust + cluster-wide or for roles impersonated by Machine ID will prevent credentials produced by Machine ID from being used to connect to resources. - As a work-around, configure Device Trust enforcement on a role-by-role basis and + As a work-around, configure Device Trust enforcement on a role-by-role basis and ensure that it is not required for roles that you will impersonate using Machine ID. @@ -247,7 +247,7 @@ responsible for managing devices, adding new devices to the inventory and removing devices that are no longer in use. - Users with the preset `editor` or `device-admin` role + Users with the preset `editor` or `device-admin` role can register and enroll their device in a single step with the following command: ```code $ tsh device enroll --current-device @@ -403,12 +403,12 @@ successfully enrolled and authenticated. ## Require MFA for administrative actions -Add an extra layer of security for sensitive administrative operations by requiring multi-factor authentication -for these high-privilege actions. Teleport enforces additional MFA verification for administrative -actions across all clients (tctl, tsh, Web UI, and Connect). This feature adds an extra security +Add an extra layer of security for sensitive administrative operations by requiring multi-factor authentication +for these high-privilege actions. Teleport enforces additional MFA verification for administrative +actions across all clients (tctl, tsh, Web UI, and Connect). This feature adds an extra security layer by re-verifying user identity immediately before any admin action, mitigating risks from compromised admin accounts. -By adopting these advanced security measures, you can create a robust defense against IdP compromises and significantly reduce your organization's attack surface. +By adopting these advanced security measures, you can create a robust defense against IdP compromises and significantly reduce your organization's attack surface. In the following sections, we'll dive deeper into each of these recommendations, providing step-by-step guidance on implementation and best practices. @@ -416,10 +416,10 @@ In the following sections, we'll dive deeper into each of these recommendations, with `tctl auth sign` will no longer be suitable for automation due to the additional MFA checks. - We recommend using Machine ID to issue certificates for automated workflows, + We recommend using Machine ID to issue certificates for automated workflows, which uses role impersonation that is not subject to MFA checks. - Certificates produced with `tctl auth sign` directly on an Auth Service instance using the super-admin + Certificates produced with `tctl auth sign` directly on an Auth Service instance using the super-admin role are not subject to MFA checks to support legacy self-hosted setups. @@ -475,7 +475,7 @@ Update the `cluster_auth_preference` definition to include the following content ``` ### Step 2/2. Save and exit the file - + The command `tctl` will update the remote definition: ```text @@ -487,5 +487,5 @@ For additional cluster hardening measures, see: - [Passwordless Authentication](./passwordless.mdx): Provides passwordless and usernameless authentication. - [Locking](./locking.mdx): Lock access to active user sessions or hosts. -- [Moderated Sessions](./moderated-sessions.mdx): Require session auditors and allow fine-grained live session access. +- [Moderated Sessions](./joining-sessions.mdx): Require session auditors and allow fine-grained live session access. - [Hardware Key Support](./hardware-key-support.mdx): Enforce the use of hardware-based private keys. diff --git a/docs/pages/admin-guides/access-controls/sso/sso.mdx b/docs/pages/admin-guides/access-controls/sso/sso.mdx index a6ea082f91061..c19de30c823d6 100644 --- a/docs/pages/admin-guides/access-controls/sso/sso.mdx +++ b/docs/pages/admin-guides/access-controls/sso/sso.mdx @@ -37,7 +37,7 @@ After a user completes an SSO authentication flow, Teleport creates a temporary When a user signs in to Teleport with `tsh login`, they can configure the TTL of the `user` Teleport creates. Teleport enforces a limit of 30 hours (the default is 12 hours). - + In the Teleport audit log, you will see an event of type `user.create` with information about the temporary user. @@ -145,7 +145,7 @@ $ ssh-keygen -L -f ~/.tsh/keys/${TELEPORT_CLUSTER}/${SSO_USER}-ssh/${TELEPORT_CL Since Teleport creates temporary users and issues short-lived certificates when a user authenticates via SSO, it is straightforward to integrate Teleport with multiple SSO providers. Besides the temporary `user` resource, no persistent -backend data in Teleport is tied to a user's account with the SSO provider. +backend data in Teleport is tied to a user's account with the SSO provider. This also means that if one SSO provider becomes unavailable, the end user only needs to choose another SSO provider when signing in to Teleport. While the @@ -187,7 +187,7 @@ The callback address can be changed if calling back to a remote machine instead of the local machine is required: ```code -# --bind-addr sets the host and port tsh will listen on, and --callback changes +# --bind-addr sets the host and port tsh will listen on, and --callback changes # what link is displayed to the user $ tsh login --proxy=proxy.example.com --auth=github --bind-addr=localhost:1234 --callback https://remote.machine:1234 ``` @@ -294,7 +294,7 @@ GitHub as an SSO option. (!docs/pages/includes/sso/saml-slo.mdx!) You may use `entity_descriptor_url` in lieu of `entity_descriptor` to fetch -the entity descriptor from your IDP. +the entity descriptor from your IDP. We recommend "pinning" the entity descriptor by including the XML rather than fetching from a URL. @@ -307,7 +307,7 @@ fetching from a URL. ``` You may use `entity_descriptor_url`, in lieu of `entity_descriptor`, to fetch -the entity descriptor from your IDP. +the entity descriptor from your IDP. We recommend "pinning" the entity descriptor by including the XML rather than fetching from a URL. @@ -334,7 +334,7 @@ fetching from a URL. ``` You may use `entity_descriptor_url`, in lieu of `entity_descriptor`, to fetch -the entity descriptor from your IDP. +the entity descriptor from your IDP. We recommend "pinning" the entity descriptor by including the XML rather than fetching from a URL. @@ -351,7 +351,7 @@ fetching from a URL. (!docs/pages/includes/sso/saml-slo.mdx!) You may use `entity_descriptor_url`, in lieu of `entity_descriptor`, to fetch -the entity descriptor from your IDP. +the entity descriptor from your IDP. We recommend "pinning" the entity descriptor by including the XML rather than fetching from a URL. @@ -366,7 +366,7 @@ fetching from a URL. -Create the connector: +Create the connector: ```code $ tctl create -f connector.yaml @@ -413,13 +413,13 @@ At this time, the `spec.provider` field should not be set for any other identity ## Configuring SSO for MFA checks -Teleport administrators can configure Teleport to delegate MFA checks to an +Teleport administrators can configure Teleport to delegate MFA checks to an SSO provider as an alternative to registering MFA devices directly with the Teleport cluster. This allows Teleport users to use MFA devices and custom flows configured in the SSO provider to carry out privileged actions in Teleport, such as: - [Per-session MFA](../guides/per-session-mfa.mdx) -- [Moderated sessions](../guides/moderated-sessions.mdx) +- [Moderated sessions](../guides/joining-sessions.mdx) - [Admin actions](../guides/mfa-for-admin-actions.mdx) Administrators may want to consider enabling this feature in order to: @@ -434,8 +434,8 @@ Administrators may want to consider enabling this feature in order to: ### Configure the IDP App / Client -There is no standardized MFA flow unlike there is with SAML/OIDC -login, so each IDP may offer zero, one, or more ways to offer MFA checks. +There is no standardized MFA flow unlike there is with SAML/OIDC +login, so each IDP may offer zero, one, or more ways to offer MFA checks. Generally, these offerings will fall under one of the following cases: @@ -448,7 +448,7 @@ which prompts for MFA for an active OIDC session. 2. Use the same IDP app for MFA: Some IDPs provide a way to fork to different flows using the same IDP app. -For example, with Okta (OIDC), you can provide `acr_values: ["phr"]` to +For example, with Okta (OIDC), you can provide `acr_values: ["phr"]` to [enforce phishing resistant authentication](https://developer.okta.com/docs/guides/step-up-authentication/main/#predefined-parameter-values). For a simpler approach, you could use the same IDP app for both login and MFA @@ -483,7 +483,7 @@ and add MFA settings. ``` You may use `entity_descriptor_url` in lieu of `entity_descriptor` to fetch -the entity descriptor from your IDP. +the entity descriptor from your IDP. We recommend "pinning" the entity descriptor by including the XML rather than fetching from a URL. @@ -491,7 +491,7 @@ fetching from a URL. -Update the connector: +Update the connector: ```code $ tctl create -f connector.yaml @@ -525,7 +525,7 @@ spec: Along with sending groups, an SSO provider will also provide a user's email address. In many organizations, the username that a person uses to log in to a system is the -same as the first part of their email address, the "local" part. +same as the first part of their email address, the "local" part. For example, `dave.smith@example.com` might log in with the username `dave.smith`. Teleport provides an easy way to extract the first part of an email address so @@ -673,4 +673,3 @@ which Teleport replaces with values from the single sign-on provider that the user used to authenticate with Teleport. For full details on how variable expansion works in Teleport roles, see the [Teleport Access Controls Reference](../../../reference/access-controls/roles.mdx). - diff --git a/docs/pages/connect-your-client/tsh.mdx b/docs/pages/connect-your-client/tsh.mdx index fe3eb874ceb56..5ec8ba17096fd 100644 --- a/docs/pages/connect-your-client/tsh.mdx +++ b/docs/pages/connect-your-client/tsh.mdx @@ -683,7 +683,7 @@ $ tsh join title="Lacking permission?" > Joining sessions requires special permissions that need to be set up by your cluster administrator. - Refer them to the [Moderated Sessions guide](../admin-guides/access-controls/guides/moderated-sessions.mdx) for more information on configuring join permissions. + Refer them to the [Moderated Sessions guide](../admin-guides/access-controls/guides/joining-sessions.mdx) for more information on configuring join permissions. diff --git a/docs/pages/connect-your-client/web-ui.mdx b/docs/pages/connect-your-client/web-ui.mdx index 9bce88e3eac52..f122d81205f64 100644 --- a/docs/pages/connect-your-client/web-ui.mdx +++ b/docs/pages/connect-your-client/web-ui.mdx @@ -2,8 +2,8 @@ title: Using the Web UI description: Using the Teleport Web UI --- -The Teleport Web UI is a web-based visual interface from which you can access resources, -view active sessions and recordings, create and review Access Requests, +The Teleport Web UI is a web-based visual interface from which you can access resources, +view active sessions and recordings, create and review Access Requests, manage users and roles, and more. This page serves a reference on Web UI features and their usage. @@ -12,24 +12,21 @@ This page serves a reference on Web UI features and their usage. The Teleport Web UI allows you to list and join active SSH sessions using a web-based terminal. -You can view the active SSH sessions that you are allowed to list by clicking **Active Sessions** in the navigation sidebar. -You can only see active sessions if you are assigned a role with `list` access for the `ssh_session` resource. -For more information about role permissions and access to resources, see [Teleport Access Controls -Reference](../reference/access-controls/roles.mdx). +You can view the active SSH sessions that you are allowed to list by clicking **Active Sessions** in the navigation sidebar. -From the active sessions list, click **Join** and select a participant mode to join the session: +From the active sessions list, click **Join** and select a participant mode to join the session: -- **As an Observer** with read-only access to the session. You can view output but cannot control the session in any way nor +- **As an Observer** with read-only access to the session. You can view output but cannot control the session in any way nor send any input. -- **As a Moderator** with permission to watch, pause, or terminate the session. You can view output and forcefully terminate - or pause the session at any time, but can't send input. - **As a Peer** to collaborate in the session. You can view output and send input. +- **As a Moderator** with permission to watch, pause, or terminate the session. You can view output and forcefully terminate + or pause the session at any time, but can't send input. Moderated sessions are an enterprise-only feature. ![joining an active session from the Web UI](../../img/webui-active-session.png) -You must have the `join_sessions` allow policy in a role you've been assigned to join sessions in any participant mode. -For information about how to configure the `join_sessions` allow policy and participant modes for a role, see -[Configure an allow policy](../admin-guides/access-controls/guides/moderated-sessions.mdx). +You must have the `join_sessions` allow policy in a role you've been assigned to join sessions in any participant mode. +For information about how to configure the `join_sessions` allow policy and participant modes for a role, see +[Configure an allow policy](../admin-guides/access-controls/guides/joining-sessions.mdx). ## Idle timeout @@ -68,7 +65,7 @@ cluster networking configuration has been updated ## Starting a database session Starting from version `17.1`, users can establish database sessions using the -Teleport Web UI. Currently, it is supported in PostgreSQL databases. +Teleport Web UI. Currently, it is supported in PostgreSQL databases. To start a new session, locate your database in the resources list and click "Connect". diff --git a/docs/pages/enroll-resources/server-access/troubleshooting-server.mdx b/docs/pages/enroll-resources/server-access/troubleshooting-server.mdx index 82197f31f5832..e1754859994bd 100644 --- a/docs/pages/enroll-resources/server-access/troubleshooting-server.mdx +++ b/docs/pages/enroll-resources/server-access/troubleshooting-server.mdx @@ -27,31 +27,31 @@ Process exited with status 255 ### Solution -You should check the permission settings for the `teleport` binary. +You should check the permission settings for the `teleport` binary. To check the file system permissions on the `teleport` binary: 1. Open a terminal shell on the computer where you have installed the `teleport` service. 1. Determine the location and file system permission of the Teleport binary by running the following command: - + ```code ls -al $(which teleport) ``` The command should return output similar to the following: - + ```text -rwxr-xr-x 1 root wheel 531849504 Aug 30 18:32 /usr/local/bin/teleport ``` - - If you don't see the permission that allows other users to read and execute (-rwxr-x**r-x**), + + If you don't see the permission that allows other users to read and execute (-rwxr-x**r-x**), you should update the permissions. For example: ```code sudo chmod go+rx $(which teleport) ``` - + 1. Restart the `teleport` service. ## Missing logins for single sign-on users @@ -66,8 +66,8 @@ provider don't see any of the logins they need to access remote resources. ### Solution -To fix this issue, you should check that the configuration of your auth connectors assigns logins to -your single sign-on users or modify the traits in the Teleport roles assigned to users through their +To fix this issue, you should check that the configuration of your auth connectors assigns logins to +your single sign-on users or modify the traits in the Teleport roles assigned to users through their group membership in the external identity provider. For more information about using traits in roles, see [Role Templates](../../admin-guides/access-controls/guides/role-templates.mdx). @@ -86,17 +86,17 @@ servers that have previously sent a heartbeat signal to the Teleport Proxy Servi one of these servers subsequently went offline: ``` -Node Name Address Labels --------------- -------------- ----------------------- -ip-172-3-1-242 127.0.0.1:3022 hostname=ip-172-3-1-242 -ip-172-3-1-75 ⟵ Tunnel hostname=ip-172-3-1-75 +Node Name Address Labels +-------------- -------------- ----------------------- +ip-172-3-1-242 127.0.0.1:3022 hostname=ip-172-3-1-242 +ip-172-3-1-75 ⟵ Tunnel hostname=ip-172-3-1-75 ip-172-3-2-177 ⟵ Tunnel hostname=ip-172-3-2-177 ``` ### Solution -To investigate whether a server that previously sent a heartbeat has become unresponsive, you can run the -`tsh ls` or `tctl nodes ls` command with the `--format json` command-line option to see additional +To investigate whether a server that previously sent a heartbeat has become unresponsive, you can run the +`tsh ls` or `tctl nodes ls` command with the `--format json` command-line option to see additional information, including an expiration time. For example: ```json @@ -112,16 +112,16 @@ information, including an expiration time. For example: }, ``` -If the server sends a regular heartbeat signal, the `expires` value should remain relatively consistent, -for example, eight to ten minutes from the current time. If the time to expire is less than the typical -expiration time—for example, within the next two or three minutes from the current time—it's likely that +If the server sends a regular heartbeat signal, the `expires` value should remain relatively consistent, +for example, eight to ten minutes from the current time. If the time to expire is less than the typical +expiration time—for example, within the next two or three minutes from the current time—it's likely that the server has stopped sending the heartbeat. ## Unable to join a shared session -Teleport allows multiple users to observe or participate in active sessions. You can define rules and -configure role-based policies to control which users can join other users' sessions from `tsh` and the -Teleport Web UI. If you are unable to join a shared session, you should check your role assignments +Teleport allows multiple users to observe or participate in active sessions. You can define rules and +configure role-based policies to control which users can join other users' sessions from `tsh` and the +Teleport Web UI. If you are unable to join a shared session, you should check your role assignments and ensure you have a role that include the `join_session` permission. For example: @@ -139,14 +139,14 @@ spec: modes: ['moderator', 'observer'] ``` -For more information about moderated sessions and session sharing, see -[Moderated Sessions](../../admin-guides/access-controls/guides/moderated-sessions.mdx). +For more information about moderated sessions and session sharing, see +[Joining Sessions](../../admin-guides/access-controls/guides/joining-sessions.mdx). ## Unable to connect to agentless OpenSSH server as root You should check your sshd configuration in `/etc/ssh/sshd_config` for a setting like `PermitRootLogin no` or `PermitRootLogin forced-commands-only` - either of these -settings will prevent login as root. +settings will prevent login as root. If you wish to log in as root to an OpenSSH server via Teleport, we recommend changing this setting to `PermitRootLogin prohibit-password`. diff --git a/docs/pages/includes/access-control-guides.mdx b/docs/pages/includes/access-control-guides.mdx index 74b9a4d2801a9..81587d59ca502 100644 --- a/docs/pages/includes/access-control-guides.mdx +++ b/docs/pages/includes/access-control-guides.mdx @@ -6,7 +6,7 @@ - [Per-Session MFA](../admin-guides/access-controls/guides/per-session-mfa.mdx): Per-session multi-mactor authentication. - [MFA for Administrative Actions](../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx): Multi-mactor authentication for admin actions. - [Locking](../admin-guides/access-controls/guides/locking.mdx): Lock access to active user sessions or hosts. -- [Moderated Sessions](../admin-guides/access-controls/guides/moderated-sessions.mdx): Require session auditors and allow fine-grained live session access. +- [Joining Sessions](../admin-guides/access-controls/guides/joining-sessions.mdx): Configure access to existing sessions. - [Hardware Key Support](../admin-guides/access-controls/guides/hardware-key-support.mdx): Enforce the use of hardware-based private keys. - [Device Trust](../admin-guides/access-controls/device-trust/guide.mdx): Register and enforce trusted devices. - [Headless WebAuthn](../admin-guides/access-controls/guides/headless.mdx): Login with Webauthn from a remote device. diff --git a/docs/pages/includes/edition-comparison.mdx b/docs/pages/includes/edition-comparison.mdx index d9ff6ec133199..4d5e3d1fd094b 100644 --- a/docs/pages/includes/edition-comparison.mdx +++ b/docs/pages/includes/edition-comparison.mdx @@ -4,7 +4,7 @@ |---|---|---|---| |[Dual Authorization](../admin-guides/access-controls/guides/dual-authz.mdx)|✖|✔|✔| |[Hardware Key Support](../admin-guides/access-controls/guides/hardware-key-support.mdx)|✖|✔|✔| -|[Moderated Sessions](../admin-guides/access-controls/guides/moderated-sessions.mdx)|✖|✔|✔| +|[Moderated Sessions](../admin-guides/access-controls/guides/joining-sessions.mdx)|✖|✔|✔| |[Role-Based Access Control](../admin-guides/access-controls/guides/role-templates.mdx)|✔|✔|✔| |[Single Sign-On](../admin-guides/access-controls/sso/sso.mdx)|GitHub|GitHub, Google Workspace, OIDC, SAML, Teleport|GitHub, Google Workspace, OIDC, SAML, Teleport| @@ -76,4 +76,3 @@ _Available as an add-on to Teleport Enterprise_ ||Community Edition|Enterprise|Cloud| |---|---|---|---| |Support|Community|24x7 support with premium SLAs and account managers|24x7 support with premium SLAs and account managers| - diff --git a/docs/pages/includes/role-spec.mdx b/docs/pages/includes/role-spec.mdx index ac8ff349abd1f..f293eb9681f63 100644 --- a/docs/pages/includes/role-spec.mdx +++ b/docs/pages/includes/role-spec.mdx @@ -501,7 +501,6 @@ spec: # # session - session playback records # session_tracker - an active session - # ssh_session - allows seeing active sessions page # instance - a Teleport instance # event - structured audit logging event # diff --git a/docs/pages/reference/access-controls/roles.mdx b/docs/pages/reference/access-controls/roles.mdx index 5db6ddd8441c8..d2c51e87b3868 100644 --- a/docs/pages/reference/access-controls/roles.mdx +++ b/docs/pages/reference/access-controls/roles.mdx @@ -87,7 +87,7 @@ There are currently five supported role versions: `v3`, `v4`, `v5`, `v6`, and `v `v4` and higher roles are completely backwards compatible with `v3`. The only difference lies in the default values which will be applied to the role if they are not explicitly set. -Additionally, roles with version `v5` or higher are required to use [Moderated Sessions](../../admin-guides/access-controls/guides/moderated-sessions.mdx). +Additionally, roles with version `v5` or higher are required to use [Moderated Sessions](../../admin-guides/access-controls/guides/joining-sessions.mdx). Label | `v3` Default | `v4` and higher Default ------------------ | -------------- | --------------- diff --git a/docs/pages/reference/monitoring/audit.mdx b/docs/pages/reference/monitoring/audit.mdx index c547c3c91e717..201c3495bee30 100644 --- a/docs/pages/reference/monitoring/audit.mdx +++ b/docs/pages/reference/monitoring/audit.mdx @@ -58,17 +58,19 @@ more information on how to configure the audit log, refer to the `storage` section of the example configuration file in the [Teleport Configuration Reference](../config.mdx). -Let's examine the Teleport audit log using the `dir` backend. The event log is -stored in Teleport's data dir under the `log` directory. This is usually -`/var/lib/teleport/log`. Each day is represented as a file: +Let's examine the Teleport audit log using the `dir` backend. Teleport Auth +Service instances write their logs to a subdirectory of Teleport's configured +data directory that is named based on the service's UUID. + +Each day is represented as a file: ```code -$ ls -l /var/lib/teleport/log/ +$ ls -l /var/lib/teleport/log/bbdfe5be-fb97-43af-bf3b-29ef2e302941 # total 104 -# -rw-r----- 1 root root 31638 Jan 22 20:00 2017-01-23.00:00:00.log -# -rw-r----- 1 root root 91256 Jan 31 21:00 2017-02-01.00:00:00.log -# -rw-r----- 1 root root 15815 Feb 32 22:54 2017-02-03.00:00:00.log +# -rw-r----- 1 root root 31638 Jan 22 20:00 2022-01-23.00:00:00.log +# -rw-r----- 1 root root 91256 Jan 31 21:00 2022-02-01.00:00:00.log +# -rw-r----- 1 root root 15815 Feb 32 22:54 2022-02-03.00:00:00.log ``` @@ -77,7 +79,7 @@ $ ls -l /var/lib/teleport/log/ Teleport Enterprise Cloud manages the storage of audit logs for you. You can access your audit logs via the Teleport Web UI by clicking: -**Activity** > **Audit Log** +**Audit** > **Audit Log** diff --git a/docs/pages/usage-billing.mdx b/docs/pages/usage-billing.mdx index 4e2223087d0fc..5888f2cc15b0c 100644 --- a/docs/pages/usage-billing.mdx +++ b/docs/pages/usage-billing.mdx @@ -72,6 +72,22 @@ calculate two types of billing metrics: - Monthly Active Users - Teleport Protected Resources +### Usage metrics in the Web UI + + + +This will be displayed only for those on usage-based plans. Users will need permission to read the billing resource. + + + +1. Go to https://teleport.sh/ and enter your tenant name. +1. Sign in using your administrator credentials. +1. Click on your username at the top right to expand the dropdown menu. +1. Click on "Billing Summary". +1. Usage data for the current billing cycle will be displayed. Example: + +![Billing Cycle](../img/webui_billing_cycle.png) + ### Monthly Active Users Monthly Active Users (MAU) is the aggregate number of unique active users @@ -90,7 +106,7 @@ provider, as user activity. Note: when configured to perform single-sign-on against an external identity provider, Teleport creates temporary user records that are valid for the -duration of the SSO session. As a result, the *Users* page in Teleport's web UI +duration of the SSO session. As a result, the _Users_ page in Teleport's web UI will only show users who have recently logged in and is not a true representation of all active users over the last month. @@ -133,7 +149,7 @@ self-hosted Teleport infrastructure or Teleport Cloud, depending on the user's plan. The submission service persists usage reports in the case of a submission failure, and deletes the reports after a successful submission. It is not possible to set up a third-party destination for usage events to independently -verify usage event data. +verify usage event data. If you are using Teleport Enterprise (Cloud), your usage data is accurate as long as Teleport-managed reporting infrastructure works as expected (check the @@ -171,4 +187,3 @@ to Teleport during a given time period. The Teleport documentation includes [how-to guides](./admin-guides/management/export-audit-events/export-audit-events.mdx) for exporting audit events to common log management solutions so you can identify users that have authenticated using an SSO provider. - diff --git a/docs/postrelease.md b/docs/postrelease.md index 8b16c04e6e8fb..7f51b2cdf6a52 100644 --- a/docs/postrelease.md +++ b/docs/postrelease.md @@ -15,7 +15,7 @@ The AWS AMI ID PR can be merged right away. ### Major releases only - [ ] Update support matrix in docs FAQ page - - Example: /~https://github.com/gravitational/teleport/pull/4602 + - Example: /~https://github.com/gravitational/teleport/pull/50345 - [ ] Update the list of OCI images to monitor and rebuild nightly in [`monitor-teleport-oci-distroless.yml` on `master`](/~https://github.com/gravitational/teleport.e/blob/master/.github/workflows/monitor-teleport-oci-distroless.yml) and [`rebuild-teleport-oci-distroless-cron.yml` on `master`](/~https://github.com/gravitational/teleport.e/blob/master/.github/workflows/rebuild-teleport-oci-distroless-cron.yml) diff --git a/e b/e index 75a0c92af6dec..f07be3414692c 160000 --- a/e +++ b/e @@ -1 +1 @@ -Subproject commit 75a0c92af6dec9641c4cb49022c4b7f7bac8daef +Subproject commit f07be3414692c3d46aa80bd9a4641767bf281cb2 diff --git a/lib/auth/auth_with_roles_test.go b/lib/auth/auth_with_roles_test.go index 9fa89ab1b1301..506b5e5da1de1 100644 --- a/lib/auth/auth_with_roles_test.go +++ b/lib/auth/auth_with_roles_test.go @@ -6759,6 +6759,138 @@ func TestGetActiveSessionTrackers(t *testing.T) { }, checkSessionTrackers: require.Empty, }, + { + // an explicit deny on session_tracker still allows listing + // for sessions that the user can join + name: "explicit-deny-can-join", + makeRole: func() (types.Role, error) { + return types.NewRole("observe-sessions", types.RoleSpecV6{ + Allow: types.RoleConditions{ + JoinSessions: []*types.SessionJoinPolicy{ + { + Name: "observe-kube-sessions", + Kinds: []string{string(types.KubernetesSessionKind)}, + Modes: []string{string(types.SessionObserverMode)}, + Roles: []string{"access"}, + }, + }, + }, + Deny: types.RoleConditions{ + Rules: []types.Rule{{ + Resources: []string{types.KindSessionTracker}, + Verbs: []string{types.VerbList, types.VerbRead}, + }}, + }, + }) + }, + extraSetup: func(t *testing.T, srv *TestTLSServer) { + originator, err := types.NewUser("session-originator") + require.NoError(t, err) + + originator.AddRole("access") + _, err = srv.Auth().UpsertUser(context.Background(), originator) + require.NoError(t, err) + }, + makeTracker: func(testUser types.User) (types.SessionTracker, error) { + return types.NewSessionTracker(types.SessionTrackerSpecV1{ + SessionID: "1", + Kind: string(types.KubernetesSessionKind), + HostUser: "session-originator", + HostPolicies: []*types.SessionTrackerPolicySet{ + {Name: "access"}, + }, + }) + }, + checkSessionTrackers: require.NotEmpty, + }, + { + // user who can join SSH sessions should not be able to list + // kubernetes sessions + name: "no-access-wrong-kind", + makeRole: func() (types.Role, error) { + return types.NewRole("observe-sessions", types.RoleSpecV6{ + Allow: types.RoleConditions{ + JoinSessions: []*types.SessionJoinPolicy{ + { + Name: "observe-ssh-sessions", + Kinds: []string{string(types.SSHSessionKind)}, + Modes: []string{string(types.SessionObserverMode)}, + Roles: []string{"access"}, + }, + }, + }, + Deny: types.RoleConditions{ + Rules: []types.Rule{{ + Resources: []string{types.KindSessionTracker}, + Verbs: []string{types.VerbList, types.VerbRead}, + }}, + }, + }) + }, + extraSetup: func(t *testing.T, srv *TestTLSServer) { + originator, err := types.NewUser("session-originator") + require.NoError(t, err) + + originator.AddRole("access") + _, err = srv.Auth().UpsertUser(context.Background(), originator) + require.NoError(t, err) + }, + makeTracker: func(testUser types.User) (types.SessionTracker, error) { + return types.NewSessionTracker(types.SessionTrackerSpecV1{ + SessionID: "1", + Kind: string(types.KubernetesSessionKind), + HostUser: "session-originator", + HostPolicies: []*types.SessionTrackerPolicySet{ + {Name: "access"}, + }, + }) + }, + checkSessionTrackers: require.Empty, + }, + { + // Test RFD 45 logic: an exception for the legacy ssh_session resource. + // (Explicit deny wins, even when the user can join the session) + name: "rfd-45-legacy-rbac", + makeRole: func() (types.Role, error) { + return types.NewRole("observe-sessions", types.RoleSpecV6{ + Allow: types.RoleConditions{ + JoinSessions: []*types.SessionJoinPolicy{ + { + Name: "observe-ssh-sessions", + Kinds: []string{string(types.SSHSessionKind)}, + Modes: []string{string(types.SessionObserverMode)}, + Roles: []string{"access"}, + }, + }, + }, + Deny: types.RoleConditions{ + Rules: []types.Rule{{ + Resources: []string{types.KindSSHSession}, + Verbs: []string{types.VerbList, types.VerbRead}, + }}, + }, + }) + }, + extraSetup: func(t *testing.T, srv *TestTLSServer) { + originator, err := types.NewUser("session-originator") + require.NoError(t, err) + + originator.AddRole("access") + _, err = srv.Auth().UpsertUser(context.Background(), originator) + require.NoError(t, err) + }, + makeTracker: func(testUser types.User) (types.SessionTracker, error) { + return types.NewSessionTracker(types.SessionTrackerSpecV1{ + SessionID: "1", + Kind: string(types.SSHSessionKind), + HostUser: "session-originator", + HostPolicies: []*types.SessionTrackerPolicySet{ + {Name: "access"}, + }, + }) + }, + checkSessionTrackers: require.Empty, + }, } { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() diff --git a/lib/client/ca_export.go b/lib/client/ca_export.go index 6a1aefddb058d..4fbd59fc4df98 100644 --- a/lib/client/ca_export.go +++ b/lib/client/ca_export.go @@ -71,9 +71,21 @@ func (r *ExportAuthoritiesRequest) shouldExportIntegration(ctx context.Context) } } -// ExportAuthorities returns the list of authorities in OpenSSH compatible formats as a string. -// If the ExportAuthoritiesRequest.AuthType is present only prints keys for CAs of this type, -// otherwise returns host and user SSH keys. +// ExportedAuthority represents an exported authority certificate, as returned +// by [ExportAllAuthorities] or [ExportAllAuthoritiesSecrets]. +type ExportedAuthority struct { + // Data is the output of the exported authority. + // May be an SSH authorized key, an SSH known hosts entry, a DER or a PEM, + // depending on the type of the exported authority. + Data []byte +} + +// ExportAllAuthorities exports public keys of all authorities of a particular +// type. The export format depends on the authority type, see below for +// details. +// +// An empty ExportAuthoritiesRequest.AuthType is interpreted as an export for +// host and user SSH keys. // // Exporting using "tls*", "database", "windows" AuthType: // Returns the certificate authority public key to be used by systems that rely on TLS. @@ -95,27 +107,92 @@ func (r *ExportAuthoritiesRequest) shouldExportIntegration(ctx context.Context) // For example: // > @cert-authority *.cluster-a ssh-rsa AAA... type=host // URL encoding is used to pass the CA type and allowed logins into the comment field. -func ExportAuthorities(ctx context.Context, client authclient.ClientI, req ExportAuthoritiesRequest) (string, error) { - if isIntegration, err := req.shouldExportIntegration(ctx); err != nil { - return "", trace.Wrap(err) - } else if isIntegration { - return exportAuthForIntegration(ctx, client, req) +// +// At least one authority is guaranteed on success. +func ExportAllAuthorities(ctx context.Context, client authclient.ClientI, req ExportAuthoritiesRequest) ([]*ExportedAuthority, error) { + const exportSecrets = false + return exportAllAuthorities(ctx, client, req, exportSecrets) +} + +// ExportAllAuthoritiesSecrets exports private keys of all authorities of a +// particular type. +// See [ExportAllAuthorities] for more information. +// +// At least one authority is guaranteed on success. +func ExportAllAuthoritiesSecrets(ctx context.Context, client authclient.ClientI, req ExportAuthoritiesRequest) ([]*ExportedAuthority, error) { + const exportSecrets = true + return exportAllAuthorities(ctx, client, req, exportSecrets) +} + +func exportAllAuthorities( + ctx context.Context, + client authclient.ClientI, + req ExportAuthoritiesRequest, + exportSecrets bool, +) ([]*ExportedAuthority, error) { + var authorities []*ExportedAuthority + switch isIntegration, err := req.shouldExportIntegration(ctx); { + case err != nil: + return nil, trace.Wrap(err) + case isIntegration && exportSecrets: + return nil, trace.NotImplemented("export with secrets is not supported for %q CAs", req.AuthType) + case isIntegration: + authorities, err = exportAuthForIntegration(ctx, client, req) + if err != nil { + return nil, trace.Wrap(err) + } + default: + authorities, err = exportAuth(ctx, client, req, exportSecrets) + if err != nil { + return nil, trace.Wrap(err) + } } - return exportAuth(ctx, client, req, false /* exportSecrets */) + + // Sanity check that we have at least one authority. + // Not expected to happen in practice. + if len(authorities) == 0 { + return nil, trace.BadParameter("export returned zero authorities") + } + + return authorities, nil } -// ExportAuthoritiesSecrets exports the Authority Certificate secrets (private keys). -// See ExportAuthorities for more information. +// ExportAuthorities is the single-authority version of [ExportAllAuthorities]. +// Soft-deprecated, prefer using [ExportAllAuthorities] and handling exports +// with more than one authority gracefully. +func ExportAuthorities(ctx context.Context, client authclient.ClientI, req ExportAuthoritiesRequest) (string, error) { + // TODO(codingllama): Remove ExportAuthorities. + return exportAuthorities(ctx, client, req, ExportAllAuthorities) +} + +// ExportAuthoritiesSecrets is the single-authority variant of +// [ExportAllAuthoritiesSecrets]. +// Soft-deprecated, prefer using [ExportAllAuthoritiesSecrets] and handling +// exports with more than one authority gracefully. func ExportAuthoritiesSecrets(ctx context.Context, client authclient.ClientI, req ExportAuthoritiesRequest) (string, error) { - if isIntegration, err := req.shouldExportIntegration(ctx); err != nil { + // TODO(codingllama): Remove ExportAuthoritiesSecrets. + return exportAuthorities(ctx, client, req, ExportAllAuthoritiesSecrets) +} + +func exportAuthorities( + ctx context.Context, + client authclient.ClientI, + req ExportAuthoritiesRequest, + exportAllFunc func(context.Context, authclient.ClientI, ExportAuthoritiesRequest) ([]*ExportedAuthority, error), +) (string, error) { + authorities, err := exportAllFunc(ctx, client, req) + if err != nil { return "", trace.Wrap(err) - } else if isIntegration { - return "", trace.NotImplemented("export with secrets is not supported for %q CAs", req.AuthType) } - return exportAuth(ctx, client, req, true /* exportSecrets */) + // At least one authority is guaranteed on success by both ExportAll methods. + if l := len(authorities); l > 1 { + return "", trace.BadParameter("export returned %d authorities, expected exactly one", l) + } + + return string(authorities[0].Data), nil } -func exportAuth(ctx context.Context, client authclient.ClientI, req ExportAuthoritiesRequest, exportSecrets bool) (string, error) { +func exportAuth(ctx context.Context, client authclient.ClientI, req ExportAuthoritiesRequest, exportSecrets bool) ([]*ExportedAuthority, error) { var typesToExport []types.CertAuthType if exportSecrets { @@ -123,7 +200,7 @@ func exportAuth(ctx context.Context, client authclient.ClientI, req ExportAuthor if err == nil { ctx = mfa.ContextWithMFAResponse(ctx, mfaResponse) } else if !errors.Is(err, &mfa.ErrMFANotRequired) && !errors.Is(err, &mfa.ErrMFANotSupported) { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } } @@ -205,13 +282,13 @@ func exportAuth(ctx context.Context, client authclient.ClientI, req ExportAuthor } else { authType := types.CertAuthType(req.AuthType) if err := authType.Check(); err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } typesToExport = []types.CertAuthType{authType} } localAuthName, err := client.GetDomainName(ctx) if err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } // fetch authorities via auth API (and only take local CAs, ignoring @@ -220,7 +297,7 @@ func exportAuth(ctx context.Context, client authclient.ClientI, req ExportAuthor for _, at := range typesToExport { cas, err := client.GetCertAuthorities(ctx, at, exportSecrets) if err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } for _, ca := range cas { if ca.GetClusterName() == localAuthName { @@ -236,7 +313,7 @@ func exportAuth(ctx context.Context, client authclient.ClientI, req ExportAuthor if req.ExportAuthorityFingerprint != "" { fingerprint, err := sshutils.PrivateKeyFingerprint(key.PrivateKey) if err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } if fingerprint != req.ExportAuthorityFingerprint { @@ -254,7 +331,7 @@ func exportAuth(ctx context.Context, client authclient.ClientI, req ExportAuthor if req.ExportAuthorityFingerprint != "" { fingerprint, err := sshutils.AuthorizedKeyFingerprint(key.PublicKey) if err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } if fingerprint != req.ExportAuthorityFingerprint { @@ -267,7 +344,7 @@ func exportAuth(ctx context.Context, client authclient.ClientI, req ExportAuthor if req.UseCompatVersion { castr, err := hostCAFormat(ca, key.PublicKey, client) if err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } ret.WriteString(castr) @@ -282,10 +359,10 @@ func exportAuth(ctx context.Context, client authclient.ClientI, req ExportAuthor case types.HostCA: castr, err = hostCAFormat(ca, key.PublicKey, client) default: - return "", trace.BadParameter("unknown user type: %q", ca.GetType()) + return nil, trace.BadParameter("unknown user type: %q", ca.GetType()) } if err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } // write the export friendly string @@ -293,7 +370,9 @@ func exportAuth(ctx context.Context, client authclient.ClientI, req ExportAuthor } } - return ret.String(), nil + return []*ExportedAuthority{ + {Data: []byte(ret.String())}, + }, nil } type exportTLSAuthorityRequest struct { @@ -302,10 +381,10 @@ type exportTLSAuthorityRequest struct { ExportPrivateKeys bool } -func exportTLSAuthority(ctx context.Context, client authclient.ClientI, req exportTLSAuthorityRequest) (string, error) { +func exportTLSAuthority(ctx context.Context, client authclient.ClientI, req exportTLSAuthorityRequest) ([]*ExportedAuthority, error) { clusterName, err := client.GetDomainName(ctx) if err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } certAuthority, err := client.GetCertAuthority( @@ -314,29 +393,33 @@ func exportTLSAuthority(ctx context.Context, client authclient.ClientI, req expo req.ExportPrivateKeys, ) if err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } - if l := len(certAuthority.GetActiveKeys().TLS); l != 1 { - return "", trace.BadParameter("expected one TLS key pair, got %v", l) - } - keyPair := certAuthority.GetActiveKeys().TLS[0] + activeKeys := certAuthority.GetActiveKeys().TLS + // TODO(codingllama): Export AdditionalTrustedKeys as well? - bytesToExport := keyPair.Cert - if req.ExportPrivateKeys { - bytesToExport = keyPair.Key - } + authorities := make([]*ExportedAuthority, len(activeKeys)) + for i, activeKey := range activeKeys { + bytesToExport := activeKey.Cert + if req.ExportPrivateKeys { + bytesToExport = activeKey.Key + } - if !req.UnpackPEM { - return string(bytesToExport), nil - } + if req.UnpackPEM { + block, _ := pem.Decode(bytesToExport) + if block == nil { + return nil, trace.BadParameter("invalid PEM data") + } + bytesToExport = block.Bytes + } - b, _ := pem.Decode(bytesToExport) - if b == nil { - return "", trace.BadParameter("invalid PEM data") + authorities[i] = &ExportedAuthority{ + Data: bytesToExport, + } } - return string(b.Bytes), nil + return authorities, nil } // userCAFormat returns the certificate authority public key exported as a single @@ -375,21 +458,23 @@ func hostCAFormat(ca types.CertAuthority, keyBytes []byte, client authclient.Cli }) } -func exportAuthForIntegration(ctx context.Context, client authclient.ClientI, req ExportAuthoritiesRequest) (string, error) { +func exportAuthForIntegration(ctx context.Context, client authclient.ClientI, req ExportAuthoritiesRequest) ([]*ExportedAuthority, error) { switch req.AuthType { case "github": keySet, err := fetchIntegrationCAKeySet(ctx, client, req.Integration) if err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } ret, err := exportGitHubCAs(keySet, req) if err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } - return ret, nil + return []*ExportedAuthority{ + {Data: []byte(ret)}, + }, nil default: - return "", trace.BadParameter("unknown integration CA type %q", req.AuthType) + return nil, trace.BadParameter("unknown integration CA type %q", req.AuthType) } } diff --git a/lib/client/ca_export_test.go b/lib/client/ca_export_test.go index cf7ff693716fe..5e7004eb88543 100644 --- a/lib/client/ca_export_test.go +++ b/lib/client/ca_export_test.go @@ -20,21 +20,27 @@ package client import ( "context" + "crypto/rand" "crypto/x509" + "crypto/x509/pkix" "encoding/pem" "fmt" + "math/big" "testing" + "time" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" "google.golang.org/grpc" - "github.com/gravitational/teleport/api/client/proto" + clientpb "github.com/gravitational/teleport/api/client/proto" integrationpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/integration/v1" "github.com/gravitational/teleport/api/mfa" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/utils/keys" "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/auth/authclient" + "github.com/gravitational/teleport/lib/cryptosuites" "github.com/gravitational/teleport/lib/fixtures" ) @@ -56,7 +62,7 @@ func (m *mockAuthClient) GetCertAuthority(ctx context.Context, id types.CertAuth return m.server.GetCertAuthority(ctx, id, loadKeys) } -func (m *mockAuthClient) PerformMFACeremony(ctx context.Context, challengeRequest *proto.CreateAuthenticateChallengeRequest, promptOpts ...mfa.PromptOpt) (*proto.MFAAuthenticateResponse, error) { +func (m *mockAuthClient) PerformMFACeremony(ctx context.Context, challengeRequest *clientpb.CreateAuthenticateChallengeRequest, promptOpts ...mfa.PromptOpt) (*clientpb.MFAAuthenticateResponse, error) { // return MFA not required to gracefully skip the MFA prompt. return nil, &mfa.ErrMFANotRequired } @@ -77,6 +83,8 @@ func (m *mockIntegrationsClient) ExportIntegrationCertAuthorities(ctx context.Co } func TestExportAuthorities(t *testing.T) { + t.Parallel() + ctx := context.Background() const localClusterName = "localcluster" @@ -123,206 +131,421 @@ func TestExportAuthorities(t *testing.T) { require.Contains(t, s, fixtures.SSHCAPublicKey) } - for _, exportSecrets := range []bool{false, true} { - for _, tt := range []struct { - name string - req ExportAuthoritiesRequest - errorCheck require.ErrorAssertionFunc - assertNoSecrets func(t *testing.T, output string) - assertSecrets func(t *testing.T, output string) - }{ - { - name: "ssh host and user ca", - req: ExportAuthoritiesRequest{ - AuthType: "", - }, - errorCheck: require.NoError, - assertNoSecrets: func(t *testing.T, output string) { - require.Contains(t, output, "@cert-authority localcluster,*.localcluster ecdsa-sha2-nistp256") - require.Contains(t, output, "cert-authority ecdsa-sha2-nistp256") - }, - assertSecrets: func(t *testing.T, output string) {}, - }, - { - name: "user", - req: ExportAuthoritiesRequest{ - AuthType: "user", - }, - errorCheck: require.NoError, - assertNoSecrets: func(t *testing.T, output string) { - require.Contains(t, output, "cert-authority ecdsa-sha2-nistp256") - }, - assertSecrets: validatePrivateKeyPEMFunc, - }, - { - name: "host", - req: ExportAuthoritiesRequest{ - AuthType: "host", - }, - errorCheck: require.NoError, - assertNoSecrets: func(t *testing.T, output string) { - require.Contains(t, output, "@cert-authority localcluster,*.localcluster ecdsa-sha2-nistp256") - }, - assertSecrets: validatePrivateKeyPEMFunc, - }, - { - name: "tls", - req: ExportAuthoritiesRequest{ - AuthType: "tls", - }, - errorCheck: require.NoError, - assertNoSecrets: validateTLSCertificatePEMFunc, - assertSecrets: validatePrivateKeyPEMFunc, - }, - { - name: "windows", - req: ExportAuthoritiesRequest{ - AuthType: "windows", - }, - errorCheck: require.NoError, - assertNoSecrets: validateTLSCertificateDERFunc, - assertSecrets: validateECDSAPrivateKeyDERFunc, - }, - { - name: "invalid", - req: ExportAuthoritiesRequest{ - AuthType: "invalid", - }, - errorCheck: func(tt require.TestingT, err error, i ...interface{}) { - require.ErrorContains(tt, err, `"invalid" authority type is not supported`) - }, - }, - { - name: "fingerprint not found", - req: ExportAuthoritiesRequest{ - AuthType: "user", - ExportAuthorityFingerprint: "not found fingerprint", - }, - errorCheck: require.NoError, - assertNoSecrets: func(t *testing.T, output string) { - require.Empty(t, output) - }, - assertSecrets: func(t *testing.T, output string) { - require.Empty(t, output) - }, - }, - { - name: "fingerprint not found", - req: ExportAuthoritiesRequest{ - AuthType: "user", - ExportAuthorityFingerprint: "fake fingerprint", - }, - errorCheck: require.NoError, - assertNoSecrets: func(t *testing.T, output string) { - require.Empty(t, output) - }, - assertSecrets: func(t *testing.T, output string) { - require.Empty(t, output) - }, - }, - { - name: "using compat version", - req: ExportAuthoritiesRequest{ - AuthType: "user", - UseCompatVersion: true, - }, - errorCheck: require.NoError, - assertNoSecrets: func(t *testing.T, output string) { - // compat version (using 1.0) returns cert-authority to be used in the server - // even when asking for ssh authorized hosts / known hosts - require.Contains(t, output, "@cert-authority localcluster,*.localcluster ecdsa-sha2-nistp256") - }, - assertSecrets: validatePrivateKeyPEMFunc, - }, - { - name: "db", - req: ExportAuthoritiesRequest{ - AuthType: "db", - }, - errorCheck: require.NoError, - assertNoSecrets: validateTLSCertificatePEMFunc, - assertSecrets: validatePrivateKeyPEMFunc, - }, - { - name: "db-der", - req: ExportAuthoritiesRequest{ - AuthType: "db-der", - }, - errorCheck: require.NoError, - assertNoSecrets: validateTLSCertificateDERFunc, - assertSecrets: validateRSAPrivateKeyDERFunc, - }, - { - name: "db-client", - req: ExportAuthoritiesRequest{ - AuthType: "db-client", - }, - errorCheck: require.NoError, - assertNoSecrets: validateTLSCertificatePEMFunc, - assertSecrets: validatePrivateKeyPEMFunc, - }, - { - name: "db-client-der", - req: ExportAuthoritiesRequest{ - AuthType: "db-client-der", - }, - errorCheck: require.NoError, - assertNoSecrets: validateTLSCertificateDERFunc, - assertSecrets: validateRSAPrivateKeyDERFunc, - }, - { - name: "github missing integration", - req: ExportAuthoritiesRequest{ - AuthType: "github", - }, - errorCheck: require.Error, - }, - { - name: "github", - req: ExportAuthoritiesRequest{ - AuthType: "github", - Integration: "my-github", - }, - errorCheck: require.NoError, - assertNoSecrets: validateGitHubCAFunc, - }, - } { - t.Run(fmt.Sprintf("%s_exportSecrets_%v", tt.name, exportSecrets), func(t *testing.T) { - mockedClient := &mockAuthClient{ - server: testAuth.AuthServer, - integrationsClient: mockIntegrationsClient{ - caKeySet: &types.CAKeySet{ - SSH: []*types.SSHKeyPair{{ - PublicKey: []byte(fixtures.SSHCAPublicKey), - }}, - }, - }, - } - var ( - err error - exported string - ) - exportFunc := ExportAuthorities - checkFunc := tt.assertNoSecrets - - if exportSecrets { - exportFunc = ExportAuthoritiesSecrets - checkFunc = tt.assertSecrets - } + mockedAuthClient := &mockAuthClient{ + server: testAuth.AuthServer, + integrationsClient: mockIntegrationsClient{ + caKeySet: &types.CAKeySet{ + SSH: []*types.SSHKeyPair{{ + PublicKey: []byte(fixtures.SSHCAPublicKey), + }}, + }, + }, + } - if checkFunc == nil { - t.Skip("assert func not provided") - } + for _, tt := range []struct { + name string + req ExportAuthoritiesRequest + errorCheck require.ErrorAssertionFunc + assertNoSecrets func(t *testing.T, output string) + assertSecrets func(t *testing.T, output string) + skipSecrets bool + }{ + { + name: "ssh host and user ca", + req: ExportAuthoritiesRequest{ + AuthType: "", + }, + errorCheck: require.NoError, + assertNoSecrets: func(t *testing.T, output string) { + require.Contains(t, output, "@cert-authority localcluster,*.localcluster ecdsa-sha2-nistp256") + require.Contains(t, output, "cert-authority ecdsa-sha2-nistp256") + }, + assertSecrets: func(t *testing.T, output string) {}, + }, + { + name: "user", + req: ExportAuthoritiesRequest{ + AuthType: "user", + }, + errorCheck: require.NoError, + assertNoSecrets: func(t *testing.T, output string) { + require.Contains(t, output, "cert-authority ecdsa-sha2-nistp256") + }, + assertSecrets: validatePrivateKeyPEMFunc, + }, + { + name: "host", + req: ExportAuthoritiesRequest{ + AuthType: "host", + }, + errorCheck: require.NoError, + assertNoSecrets: func(t *testing.T, output string) { + require.Contains(t, output, "@cert-authority localcluster,*.localcluster ecdsa-sha2-nistp256") + }, + assertSecrets: validatePrivateKeyPEMFunc, + }, + { + name: "tls", + req: ExportAuthoritiesRequest{ + AuthType: "tls", + }, + errorCheck: require.NoError, + assertNoSecrets: validateTLSCertificatePEMFunc, + assertSecrets: validatePrivateKeyPEMFunc, + }, + { + name: "windows", + req: ExportAuthoritiesRequest{ + AuthType: "windows", + }, + errorCheck: require.NoError, + assertNoSecrets: validateTLSCertificateDERFunc, + assertSecrets: validateECDSAPrivateKeyDERFunc, + }, + { + name: "invalid", + req: ExportAuthoritiesRequest{ + AuthType: "invalid", + }, + errorCheck: func(tt require.TestingT, err error, i ...interface{}) { + require.ErrorContains(tt, err, `"invalid" authority type is not supported`) + }, + }, + { + name: "fingerprint not found", + req: ExportAuthoritiesRequest{ + AuthType: "user", + ExportAuthorityFingerprint: "not found fingerprint", + }, + errorCheck: require.NoError, + assertNoSecrets: func(t *testing.T, output string) { + require.Empty(t, output) + }, + assertSecrets: func(t *testing.T, output string) { + require.Empty(t, output) + }, + }, + { + name: "fingerprint not found", + req: ExportAuthoritiesRequest{ + AuthType: "user", + ExportAuthorityFingerprint: "fake fingerprint", + }, + errorCheck: require.NoError, + assertNoSecrets: func(t *testing.T, output string) { + require.Empty(t, output) + }, + assertSecrets: func(t *testing.T, output string) { + require.Empty(t, output) + }, + }, + { + name: "using compat version", + req: ExportAuthoritiesRequest{ + AuthType: "user", + UseCompatVersion: true, + }, + errorCheck: require.NoError, + assertNoSecrets: func(t *testing.T, output string) { + // compat version (using 1.0) returns cert-authority to be used in the server + // even when asking for ssh authorized hosts / known hosts + require.Contains(t, output, "@cert-authority localcluster,*.localcluster ecdsa-sha2-nistp256") + }, + assertSecrets: validatePrivateKeyPEMFunc, + }, + { + name: "db", + req: ExportAuthoritiesRequest{ + AuthType: "db", + }, + errorCheck: require.NoError, + assertNoSecrets: validateTLSCertificatePEMFunc, + assertSecrets: validatePrivateKeyPEMFunc, + }, + { + name: "db-der", + req: ExportAuthoritiesRequest{ + AuthType: "db-der", + }, + errorCheck: require.NoError, + assertNoSecrets: validateTLSCertificateDERFunc, + assertSecrets: validateRSAPrivateKeyDERFunc, + }, + { + name: "db-client", + req: ExportAuthoritiesRequest{ + AuthType: "db-client", + }, + errorCheck: require.NoError, + assertNoSecrets: validateTLSCertificatePEMFunc, + assertSecrets: validatePrivateKeyPEMFunc, + }, + { + name: "db-client-der", + req: ExportAuthoritiesRequest{ + AuthType: "db-client-der", + }, + errorCheck: require.NoError, + assertNoSecrets: validateTLSCertificateDERFunc, + assertSecrets: validateRSAPrivateKeyDERFunc, + }, + { + name: "github missing integration", + req: ExportAuthoritiesRequest{ + AuthType: "github", + }, + errorCheck: require.Error, + }, + { + name: "github", + req: ExportAuthoritiesRequest{ + AuthType: "github", + Integration: "my-github", + }, + errorCheck: require.NoError, + assertNoSecrets: validateGitHubCAFunc, + skipSecrets: true, // not supported for GitHub + }, + } { + runTest := func( + t *testing.T, + exportFunc func(context.Context, authclient.ClientI, ExportAuthoritiesRequest) ([]*ExportedAuthority, error), + assertFunc func(t *testing.T, output string), + ) { + authorities, err := exportFunc(ctx, mockedAuthClient, tt.req) + tt.errorCheck(t, err) + if err != nil { + return + } + + require.Len(t, authorities, 1, "exported authorities mismatch") + exported := string(authorities[0].Data) + assertFunc(t, exported) + } + + runUnaryTest := func( + t *testing.T, + exportFunc func(context.Context, authclient.ClientI, ExportAuthoritiesRequest) (string, error), + assertFunc func(t *testing.T, output string), + ) { + exported, err := exportFunc(ctx, mockedAuthClient, tt.req) + tt.errorCheck(t, err) + if err != nil { + return + } - exported, err = exportFunc(ctx, mockedClient, tt.req) - tt.errorCheck(t, err) + assertFunc(t, exported) + } + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + t.Run(fmt.Sprintf("%s/ExportAllAuthorities", tt.name), func(t *testing.T) { + runTest(t, ExportAllAuthorities, tt.assertNoSecrets) + }) + t.Run(fmt.Sprintf("%s/ExportAuthorities", tt.name), func(t *testing.T) { + runUnaryTest(t, ExportAuthorities, tt.assertNoSecrets) + }) + if tt.skipSecrets { + return + } + + t.Run(fmt.Sprintf("%s/ExportAllAuthoritiesSecrets", tt.name), func(t *testing.T) { + runTest(t, ExportAllAuthoritiesSecrets, tt.assertSecrets) + }) + t.Run(fmt.Sprintf("%s/ExportAuthoritiesSecrets", tt.name), func(t *testing.T) { + runUnaryTest(t, ExportAuthoritiesSecrets, tt.assertSecrets) + }) + }) + } +} - if err != nil { - return +// Tests a scenario similar to +// /~https://github.com/gravitational/teleport/issues/35444. +func TestExportAllAuthorities_mutipleActiveKeys(t *testing.T) { + t.Parallel() + + softwareKey, err := cryptosuites.GeneratePrivateKeyWithAlgorithm(cryptosuites.ECDSAP256) + require.NoError(t, err, "GeneratePrivateKeyWithAlgorithm errored") + // Typically the HSM key would be RSA2048, but this is fine for testing + // purposes. + hsmKey, err := cryptosuites.GeneratePrivateKeyWithAlgorithm(cryptosuites.ECDSAP256) + require.NoError(t, err, "GeneratePrivateKeyWithAlgorithm errored") + + makeSerialNumber := func() func() *big.Int { + lastSerialNumber := int64(0) + return func() *big.Int { + lastSerialNumber++ + return big.NewInt(lastSerialNumber) + } + }() + + const clusterName = "zarq" // fake, doesn't matter for this test. + makeKeyPairs := func(t *testing.T, key *keys.PrivateKey, keyType types.PrivateKeyType) (sshKP *types.SSHKeyPair, tlsPEM, tlsDER *types.TLSKeyPair) { + sshPriv, err := key.MarshalSSHPrivateKey() + require.NoError(t, err, "MarshalSSHPrivateKey errored") + sshKP = &types.SSHKeyPair{ + PublicKey: key.MarshalSSHPublicKey(), + PrivateKey: sshPriv, + PrivateKeyType: keyType, + } + + serialNumber := makeSerialNumber() + subject := pkix.Name{ + Organization: []string{clusterName}, + SerialNumber: serialNumber.String(), + CommonName: clusterName, + } + now := time.Now() + // template mimics an actual user CA certificate. + template := &x509.Certificate{ + SerialNumber: serialNumber, + Issuer: subject, + Subject: subject, + NotBefore: now.Add(-1 * time.Second), + NotAfter: now.Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + IsCA: true, + } + x509CertDER, err := x509.CreateCertificate(rand.Reader, template, template /* parent */, key.Public(), key.Signer) + require.NoError(t, err, "CreateCertificate errored") + x509CertPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: x509CertDER, + }) + tlsPEM = &types.TLSKeyPair{ + Cert: x509CertPEM, + Key: key.PrivateKeyPEM(), + KeyType: keyType, + } + + block, _ := pem.Decode(tlsPEM.Key) + require.NotNil(t, block, "pem.Decode returned nil block") + // Note that typically types.TLSKeyPair doesn't hold raw/DER data, this is + // only used for test convenience. + tlsDER = &types.TLSKeyPair{ + Cert: x509CertDER, + Key: block.Bytes, + KeyType: keyType, + } + + return sshKP, tlsPEM, tlsDER + } + + softKeySSH, softKeyPEM, softKeyDER := makeKeyPairs(t, softwareKey, types.PrivateKeyType_RAW) + hsmKeySSH, hsmKeyPEM, hsmKeyDER := makeKeyPairs(t, hsmKey, types.PrivateKeyType_PKCS11) + userCA, err := types.NewCertAuthority(types.CertAuthoritySpecV2{ + Type: "user", + ClusterName: clusterName, + ActiveKeys: types.CAKeySet{ + SSH: []*types.SSHKeyPair{ + softKeySSH, + hsmKeySSH, + }, + TLS: []*types.TLSKeyPair{ + softKeyPEM, + hsmKeyPEM, + }, + }, + }) + require.NoError(t, err, "NewCertAuthority(user) errored") + + authClient := &multiCAAuthClient{ + ClientI: nil, + clusterName: clusterName, + certAuthorities: []types.CertAuthority{userCA}, + } + ctx := context.Background() + + tests := []struct { + name string + req *ExportAuthoritiesRequest + wantPublic, wantPrivate []*ExportedAuthority + }{ + { + name: "tls-user", + req: &ExportAuthoritiesRequest{ + AuthType: "tls-user", + }, + wantPublic: []*ExportedAuthority{ + {Data: softKeyPEM.Cert}, + {Data: hsmKeyPEM.Cert}, + }, + wantPrivate: []*ExportedAuthority{ + {Data: softKeyPEM.Key}, + {Data: hsmKeyPEM.Key}, + }, + }, + { + name: "windows", + req: &ExportAuthoritiesRequest{ + AuthType: "windows", + }, + wantPublic: []*ExportedAuthority{ + {Data: softKeyDER.Cert}, + {Data: hsmKeyDER.Cert}, + }, + wantPrivate: []*ExportedAuthority{ + {Data: softKeyDER.Key}, + {Data: hsmKeyDER.Key}, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + runTest := func( + t *testing.T, + exportAllFunc func(context.Context, authclient.ClientI, ExportAuthoritiesRequest) ([]*ExportedAuthority, error), + want []*ExportedAuthority, + ) { + got, err := exportAllFunc(ctx, authClient, *test.req) + require.NoError(t, err, "exportAllFunc errored") + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Authorities mismatch (-want +got)\n%s", diff) } + } - checkFunc(t, exported) + t.Run("ExportAllAuthorities", func(t *testing.T) { + runTest(t, ExportAllAuthorities, test.wantPublic) + }) + t.Run("ExportAllAuthoritiesSecrets", func(t *testing.T) { + runTest(t, ExportAllAuthoritiesSecrets, test.wantPrivate) }) + }) + } +} + +type multiCAAuthClient struct { + authclient.ClientI + + clusterName string + certAuthorities []types.CertAuthority +} + +func (m *multiCAAuthClient) GetDomainName(context.Context) (string, error) { + return m.clusterName, nil +} + +func (m *multiCAAuthClient) GetCertAuthority(_ context.Context, id types.CertAuthID, loadKeys bool) (types.CertAuthority, error) { + for _, ca := range m.certAuthorities { + if ca.GetType() == id.Type && ca.GetClusterName() == id.DomainName { + if !loadKeys { + ca = ca.WithoutSecrets().(types.CertAuthority) + } + return ca, nil } } + return nil, nil +} + +func (m *multiCAAuthClient) PerformMFACeremony( + context.Context, + *clientpb.CreateAuthenticateChallengeRequest, + ...mfa.PromptOpt, +) (*clientpb.MFAAuthenticateResponse, error) { + // Skip MFA ceremonies. + return nil, &mfa.ErrMFANotRequired } diff --git a/lib/services/presets.go b/lib/services/presets.go index e4ab09c146928..1cf2a918a388d 100644 --- a/lib/services/presets.go +++ b/lib/services/presets.go @@ -197,6 +197,7 @@ func NewPresetEditorRole() types.Role { types.NewRule(types.KindAutoUpdateVersion, RW()), types.NewRule(types.KindAutoUpdateConfig, RW()), types.NewRule(types.KindAutoUpdateAgentRollout, RO()), + types.NewRule(types.KindGitServer, RW()), }, }, }, @@ -253,6 +254,9 @@ func NewPresetAccessRole() types.Role { Verbs: []string{types.Wildcard}, }, }, + GitHubPermissions: []types.GitHubPermission{{ + Organizations: []string{teleport.TraitInternalGitHubOrgs}, + }}, Rules: []types.Rule{ types.NewRule(types.KindEvent, RO()), { @@ -693,6 +697,7 @@ func NewPresetTerraformProviderRole() types.Role { types.NewRule(types.KindDynamicWindowsDesktop, RW()), types.NewRule(types.KindStaticHostUser, RW()), types.NewRule(types.KindWorkloadIdentity, RW()), + types.NewRule(types.KindGitServer, RW()), }, }, }, @@ -955,6 +960,16 @@ func AddRoleDefaults(ctx context.Context, role types.Role) (types.Role, error) { } } + // GitHub permissions. + if len(role.GetGitHubPermissions(types.Allow)) == 0 { + if githubOrgs := defaultGitHubOrgs()[role.GetName()]; len(githubOrgs) > 0 { + role.SetGitHubPermissions(types.Allow, []types.GitHubPermission{{ + Organizations: githubOrgs, + }}) + changed = true + } + } + if !changed { return nil, trace.AlreadyExists("no change") } @@ -1066,3 +1081,9 @@ func updateAllowLabels(role types.Role, kind string, defaultLabels types.Labels) return changed, nil } + +func defaultGitHubOrgs() map[string][]string { + return map[string][]string{ + teleport.PresetAccessRoleName: []string{teleport.TraitInternalGitHubOrgs}, + } +} diff --git a/lib/services/presets_test.go b/lib/services/presets_test.go index 4252208c37bb4..0c33b5866db11 100644 --- a/lib/services/presets_test.go +++ b/lib/services/presets_test.go @@ -139,6 +139,9 @@ func TestAddRoleDefaults(t *testing.T) { DatabaseServiceLabels: defaultAllowLabels(false)[teleport.PresetAccessRoleName].DatabaseServiceLabels, DatabaseRoles: defaultAllowLabels(false)[teleport.PresetAccessRoleName].DatabaseRoles, Rules: NewPresetAccessRole().GetRules(types.Allow), + GitHubPermissions: []types.GitHubPermission{{ + Organizations: defaultGitHubOrgs()[teleport.PresetAccessRoleName], + }}, }, }, }, @@ -171,6 +174,9 @@ func TestAddRoleDefaults(t *testing.T) { DatabaseServiceLabels: defaultAllowLabels(false)[teleport.PresetAccessRoleName].DatabaseServiceLabels, DatabaseRoles: defaultAllowLabels(false)[teleport.PresetAccessRoleName].DatabaseRoles, Rules: defaultAllowRules()[teleport.PresetAccessRoleName], + GitHubPermissions: []types.GitHubPermission{{ + Organizations: defaultGitHubOrgs()[teleport.PresetAccessRoleName], + }}, }, }, }, @@ -186,6 +192,9 @@ func TestAddRoleDefaults(t *testing.T) { DatabaseServiceLabels: defaultAllowLabels(false)[teleport.PresetAccessRoleName].DatabaseServiceLabels, DatabaseRoles: defaultAllowLabels(false)[teleport.PresetAccessRoleName].DatabaseRoles, Rules: defaultAllowRules()[teleport.PresetAccessRoleName], + GitHubPermissions: []types.GitHubPermission{{ + Organizations: defaultGitHubOrgs()[teleport.PresetAccessRoleName], + }}, }, }, }, @@ -202,6 +211,9 @@ func TestAddRoleDefaults(t *testing.T) { DatabaseServiceLabels: defaultAllowLabels(false)[teleport.PresetAccessRoleName].DatabaseServiceLabels, DatabaseRoles: defaultAllowLabels(false)[teleport.PresetAccessRoleName].DatabaseRoles, Rules: defaultAllowRules()[teleport.PresetAccessRoleName], + GitHubPermissions: []types.GitHubPermission{{ + Organizations: defaultGitHubOrgs()[teleport.PresetAccessRoleName], + }}, }, }, }, @@ -735,6 +747,7 @@ func TestAddRoleDefaults(t *testing.T) { types.NewRule(types.KindDynamicWindowsDesktop, RW()), types.NewRule(types.KindStaticHostUser, RW()), types.NewRule(types.KindWorkloadIdentity, RW()), + types.NewRule(types.KindGitServer, RW()), }, }, }, diff --git a/lib/services/role.go b/lib/services/role.go index d9691dc29ec27..458662acf5305 100644 --- a/lib/services/role.go +++ b/lib/services/role.go @@ -511,6 +511,12 @@ func ApplyTraits(r types.Role, traits map[string][]string) (types.Role, error) { outDbRoles := applyValueTraitsSlice(inDbRoles, traits, "database role") r.SetDatabaseRoles(condition, apiutils.Deduplicate(outDbRoles)) + githubPermissions := r.GetGitHubPermissions(condition) + for i, perm := range githubPermissions { + githubPermissions[i].Organizations = applyValueTraitsSlice(perm.Organizations, traits, "github organizations") + } + r.SetGitHubPermissions(condition, githubPermissions) + var out []types.KubernetesResource // we access the resources in the role using the role conditions // to avoid receiving the compatibility resources added in GetKubernetesResources @@ -677,7 +683,8 @@ func ApplyValueTraits(val string, traits map[string][]string) ([]string, error) constants.TraitKubeGroups, constants.TraitKubeUsers, constants.TraitDBNames, constants.TraitDBUsers, constants.TraitDBRoles, constants.TraitAWSRoleARNs, constants.TraitAzureIdentities, - constants.TraitGCPServiceAccounts, constants.TraitJWT: + constants.TraitGCPServiceAccounts, constants.TraitJWT, + constants.TraitGitHubOrgs: default: return trace.BadParameter("unsupported variable %q", name) } diff --git a/lib/services/role_test.go b/lib/services/role_test.go index 8feea55b4000c..b5fe8b1778f11 100644 --- a/lib/services/role_test.go +++ b/lib/services/role_test.go @@ -2960,6 +2960,8 @@ func TestApplyTraits(t *testing.T) { inSudoers []string outSudoers []string outKubeResources []types.KubernetesResource + inGitHubPermissions []types.GitHubPermission + outGitHubPermissions []types.GitHubPermission } tests := []struct { comment string @@ -3728,6 +3730,34 @@ func TestApplyTraits(t *testing.T) { }, }, }, + { + comment: "GitHub permissions in allow rule", + inTraits: map[string][]string{ + "github_orgs": {"my-org1", "my-org2"}, + }, + allow: rule{ + inGitHubPermissions: []types.GitHubPermission{{ + Organizations: []string{"{{internal.github_orgs}}"}, + }}, + outGitHubPermissions: []types.GitHubPermission{{ + Organizations: []string{"my-org1", "my-org2"}, + }}, + }, + }, + { + comment: "GitHub permissions in deny rule", + inTraits: map[string][]string{ + "orgs": {"my-org1", "my-org2"}, + }, + deny: rule{ + inGitHubPermissions: []types.GitHubPermission{{ + Organizations: []string{"{{external.orgs}}"}, + }}, + outGitHubPermissions: []types.GitHubPermission{{ + Organizations: []string{"my-org1", "my-org2"}, + }}, + }, + }, } for _, tt := range tests { t.Run(tt.comment, func(t *testing.T) { @@ -3759,6 +3789,7 @@ func TestApplyTraits(t *testing.T) { Impersonate: &tt.allow.inImpersonate, HostSudoers: tt.allow.inSudoers, KubernetesResources: tt.allow.inKubeResources, + GitHubPermissions: tt.allow.inGitHubPermissions, }, Deny: types.RoleConditions{ Logins: tt.deny.inLogins, @@ -3780,6 +3811,7 @@ func TestApplyTraits(t *testing.T) { Impersonate: &tt.deny.inImpersonate, HostSudoers: tt.deny.outSudoers, KubernetesResources: tt.deny.inKubeResources, + GitHubPermissions: tt.deny.inGitHubPermissions, }, }, } @@ -3813,6 +3845,7 @@ func TestApplyTraits(t *testing.T) { require.Equal(t, rule.spec.outImpersonate, outRole.GetImpersonateConditions(rule.condition)) require.Equal(t, rule.spec.outSudoers, outRole.GetHostSudoers(rule.condition)) require.Equal(t, rule.spec.outKubeResources, outRole.GetRoleConditions(rule.condition).KubernetesResources) + require.Equal(t, rule.spec.outGitHubPermissions, outRole.GetRoleConditions(rule.condition).GitHubPermissions) } }) } diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index bbc957e4de71f..91491831a6fb1 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -629,7 +629,7 @@ func NewHandler(cfg Config, opts ...HandlerOption) (*APIHandler, error) { // part[0] is empty space from leading slash "/" // part[1] is the prefix "v1" switch pathParts[2] { - case "webapi", "enterprise", "scripts", ".well-known", "workload-identity": + case "webapi", "enterprise", "scripts", ".well-known", "workload-identity", "web": http.StripPrefix(v1Prefix, h).ServeHTTP(w, r) return } @@ -1930,7 +1930,7 @@ func setEntitlementsWithLegacyLogic(webCfg *webclient.WebConfig, clusterFeatures // set default Identity fields to legacy feature value webCfg.Entitlements[string(entitlements.AccessLists)] = webclient.EntitlementInfo{Enabled: true, Limit: clusterFeatures.GetAccessList().GetCreateLimit()} webCfg.Entitlements[string(entitlements.AccessMonitoring)] = webclient.EntitlementInfo{Enabled: clusterFeatures.GetAccessMonitoring().GetEnabled(), Limit: clusterFeatures.GetAccessMonitoring().GetMaxReportRangeLimit()} - webCfg.Entitlements[string(entitlements.AccessRequests)] = webclient.EntitlementInfo{Enabled: clusterFeatures.GetAccessRequests().MonthlyRequestLimit > 0, Limit: clusterFeatures.GetAccessRequests().GetMonthlyRequestLimit()} + webCfg.Entitlements[string(entitlements.AccessRequests)] = webclient.EntitlementInfo{Enabled: clusterFeatures.GetAccessRequests().GetMonthlyRequestLimit() > 0, Limit: clusterFeatures.GetAccessRequests().GetMonthlyRequestLimit()} webCfg.Entitlements[string(entitlements.DeviceTrust)] = webclient.EntitlementInfo{Enabled: clusterFeatures.GetDeviceTrust().GetEnabled(), Limit: clusterFeatures.GetDeviceTrust().GetDevicesUsageLimit()} // override Identity Package features if Identity is enabled: set true and clear limit if clusterFeatures.GetIdentityGovernance() { @@ -1944,7 +1944,7 @@ func setEntitlementsWithLegacyLogic(webCfg *webclient.WebConfig, clusterFeatures } // webCfg.: set equal to legacy feature value - webCfg.AccessRequests = clusterFeatures.GetAccessRequests().MonthlyRequestLimit > 0 + webCfg.AccessRequests = clusterFeatures.GetAccessRequests().GetMonthlyRequestLimit() > 0 webCfg.ExternalAuditStorage = clusterFeatures.GetExternalAuditStorage() webCfg.HideInaccessibleFeatures = clusterFeatures.GetFeatureHiding() webCfg.IsIGSEnabled = clusterFeatures.GetIdentityGovernance() diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index cd446223478e7..58c0d1132f3d2 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -3565,6 +3565,89 @@ func TestEndpointNotFoundHandling(t *testing.T) { } } +func TestKnownWebPathsWithAndWithoutV1Prefix(t *testing.T) { + t.Parallel() + const username = "test-user@example.com" + // Allow user to create tokens. + roleTokenCRD, err := types.NewRole(services.RoleNameForUser(username), types.RoleSpecV6{ + Allow: types.RoleConditions{ + Rules: []types.Rule{ + types.NewRule(types.KindToken, + []string{types.VerbCreate}), + }, + }, + }) + require.NoError(t, err) + + env := newWebPack(t, 1) + proxy := env.proxies[0] + pack := proxy.authPack(t, username, []types.Role{roleTokenCRD}) + + res, err := pack.clt.PostJSON(context.Background(), pack.clt.Endpoint("webapi", "token"), types.ProvisionTokenSpecV2{ + Roles: types.SystemRoles{types.RoleNode}, + }) + require.NoError(t, err) + + var responseToken nodeJoinToken + err = json.Unmarshal(res.Bytes(), &responseToken) + require.NoError(t, err) + + tt := []struct { + name string + endpoint string + }{ + { + name: "web path with prefix", + endpoint: "v1/web/config.js", + }, + { + name: "web path without prefix", + endpoint: "web/config.js", + }, + { + name: "webapi path with prefix", + endpoint: "v1/webapi/spiffe/bundle.json", + }, + { + name: "webapi path without prefix", + endpoint: "webapi/spiffe/bundle.json", + }, + { + name: ".well-known path with prefix", + endpoint: "v1/.well-known/jwks.json", + }, + { + name: ".well-known path without prefix", + endpoint: ".well-known/jwks.json", + }, + { + name: "workload-identity path with prefix", + endpoint: "v1/workload-identity/jwt-jwks.json", + }, + { + name: "workload-identity path without prefix", + endpoint: "workload-identity/jwt-jwks.json", + }, + { + name: "scripts path with prefix", + endpoint: fmt.Sprintf("v1/scripts/%s/install-node.sh", responseToken.ID), + }, + { + name: "scripts path without prefix", + endpoint: fmt.Sprintf("scripts/%s/install-node.sh", responseToken.ID), + }, + } + + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + _, err := pack.clt.Get(context.Background(), fmt.Sprintf("%s/%s", proxy.web.URL, tc.endpoint), url.Values{}) + + require.NoError(t, err) + }) + } +} + func TestInstallDatabaseScriptGeneration(t *testing.T) { const username = "test-user@example.com" diff --git a/package.json b/package.json index 6e517dc59b041..7bc300198a9e9 100644 --- a/package.json +++ b/package.json @@ -66,7 +66,7 @@ "react-select-event": "^5.5.1", "storybook": "^8.4.7", "typescript": "^5.7.3", - "vite": "^6.0.6" + "vite": "^6.0.11" }, "dependencies": { "@codemirror/autocomplete": "^6.18.4", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ef9085f16a6ab..83e985448add9 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -118,7 +118,7 @@ importers: version: 8.4.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@8.4.7(prettier@3.4.2))(typescript@5.7.3) '@storybook/react-vite': specifier: ^8.4.7 - version: 8.4.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(rollup@4.29.1)(storybook@8.4.7(prettier@3.4.2))(typescript@5.7.3)(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) + version: 8.4.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(rollup@4.29.1)(storybook@8.4.7(prettier@3.4.2))(typescript@5.7.3)(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) '@storybook/test-runner': specifier: ^0.21.0 version: 0.21.0(@types/node@20.17.11)(babel-plugin-macros@3.1.0)(storybook@8.4.7(prettier@3.4.2)) @@ -192,8 +192,8 @@ importers: specifier: ^5.7.3 version: 5.7.3 vite: - specifier: ^6.0.6 - version: 6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) + specifier: ^6.0.11 + version: 6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) e/web/teleport: {} @@ -228,7 +228,7 @@ importers: version: 21.1.7 '@vitejs/plugin-react-swc': specifier: ^3.7.2 - version: 3.7.2(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) + version: 3.7.2(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) babel-plugin-styled-components: specifier: ^2.1.4 version: 2.1.4(@babel/core@7.26.0)(styled-components@6.1.13(react-dom@18.3.1(react@18.3.1))(react@18.3.1)) @@ -270,10 +270,10 @@ importers: version: 8.19.0(eslint@9.17.0)(typescript@5.7.3) vite-plugin-wasm: specifier: ^3.4.1 - version: 3.4.1(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) + version: 3.4.1(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) vite-tsconfig-paths: specifier: ^5.1.4 - version: 5.1.4(typescript@5.7.3)(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) + version: 5.1.4(typescript@5.7.3)(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) web/packages/design: dependencies: @@ -480,7 +480,7 @@ importers: version: 25.1.8(electron-builder-squirrel-windows@25.1.8(dmg-builder@25.1.8)) electron-vite: specifier: ^2.3.0 - version: 2.3.0(@swc/core@1.10.4)(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) + version: 2.3.0(@swc/core@1.10.4)(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) events: specifier: 3.3.0 version: 3.3.0 @@ -6695,8 +6695,8 @@ packages: vite: optional: true - vite@6.0.6: - resolution: {integrity: sha512-NSjmUuckPmDU18bHz7QZ+bTYhRR0iA72cs2QAxCqDpafJ0S6qetco0LB3WW2OxlMHS0JmAv+yZ/R3uPmMyGTjQ==} + vite@6.0.11: + resolution: {integrity: sha512-4VL9mQPKoHy4+FE0NnRE/kbY51TOfaknxAjt3fJbGJxhIpBZiqVzlZDEesWWsuREXHwNdAoOFZ9MkPEVXczHwg==} engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} hasBin: true peerDependencies: @@ -8709,11 +8709,11 @@ snapshots: '@types/yargs': 17.0.33 chalk: 4.1.2 - '@joshwooding/vite-plugin-react-docgen-typescript@0.4.2(typescript@5.7.3)(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1))': + '@joshwooding/vite-plugin-react-docgen-typescript@0.4.2(typescript@5.7.3)(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1))': dependencies: magic-string: 0.27.0 react-docgen-typescript: 2.2.2(typescript@5.7.3) - vite: 6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) + vite: 6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) optionalDependencies: typescript: 5.7.3 @@ -9240,13 +9240,13 @@ snapshots: dependencies: storybook: 8.4.7(prettier@3.4.2) - '@storybook/builder-vite@8.4.7(storybook@8.4.7(prettier@3.4.2))(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1))': + '@storybook/builder-vite@8.4.7(storybook@8.4.7(prettier@3.4.2))(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1))': dependencies: '@storybook/csf-plugin': 8.4.7(storybook@8.4.7(prettier@3.4.2)) browser-assert: 1.2.1 storybook: 8.4.7(prettier@3.4.2) ts-dedent: 2.2.0 - vite: 6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) + vite: 6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) '@storybook/components@8.4.7(storybook@8.4.7(prettier@3.4.2))': dependencies: @@ -9297,11 +9297,11 @@ snapshots: react-dom: 18.3.1(react@18.3.1) storybook: 8.4.7(prettier@3.4.2) - '@storybook/react-vite@8.4.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(rollup@4.29.1)(storybook@8.4.7(prettier@3.4.2))(typescript@5.7.3)(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1))': + '@storybook/react-vite@8.4.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(rollup@4.29.1)(storybook@8.4.7(prettier@3.4.2))(typescript@5.7.3)(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1))': dependencies: - '@joshwooding/vite-plugin-react-docgen-typescript': 0.4.2(typescript@5.7.3)(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) + '@joshwooding/vite-plugin-react-docgen-typescript': 0.4.2(typescript@5.7.3)(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) '@rollup/pluginutils': 5.1.3(rollup@4.29.1) - '@storybook/builder-vite': 8.4.7(storybook@8.4.7(prettier@3.4.2))(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) + '@storybook/builder-vite': 8.4.7(storybook@8.4.7(prettier@3.4.2))(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)) '@storybook/react': 8.4.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@8.4.7(prettier@3.4.2))(typescript@5.7.3) find-up: 5.0.0 magic-string: 0.30.14 @@ -9311,7 +9311,7 @@ snapshots: resolve: 1.22.8 storybook: 8.4.7(prettier@3.4.2) tsconfig-paths: 4.2.0 - vite: 6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) + vite: 6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) transitivePeerDependencies: - '@storybook/test' - rollup @@ -9850,10 +9850,10 @@ snapshots: - '@codemirror/lint' - '@codemirror/search' - '@vitejs/plugin-react-swc@3.7.2(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1))': + '@vitejs/plugin-react-swc@3.7.2(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1))': dependencies: '@swc/core': 1.10.4 - vite: 6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) + vite: 6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) transitivePeerDependencies: - '@swc/helpers' @@ -11073,7 +11073,7 @@ snapshots: electron-to-chromium@1.5.67: {} - electron-vite@2.3.0(@swc/core@1.10.4)(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)): + electron-vite@2.3.0(@swc/core@1.10.4)(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)): dependencies: '@babel/core': 7.26.0 '@babel/plugin-transform-arrow-functions': 7.25.9(@babel/core@7.26.0) @@ -11081,7 +11081,7 @@ snapshots: esbuild: 0.21.5 magic-string: 0.30.14 picocolors: 1.1.1 - vite: 6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) + vite: 6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) optionalDependencies: '@swc/core': 1.10.4 transitivePeerDependencies: @@ -14409,22 +14409,22 @@ snapshots: extsprintf: 1.4.1 optional: true - vite-plugin-wasm@3.4.1(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)): + vite-plugin-wasm@3.4.1(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)): dependencies: - vite: 6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) + vite: 6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) - vite-tsconfig-paths@5.1.4(typescript@5.7.3)(vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)): + vite-tsconfig-paths@5.1.4(typescript@5.7.3)(vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1)): dependencies: debug: 4.3.7 globrex: 0.1.2 tsconfck: 3.1.0(typescript@5.7.3) optionalDependencies: - vite: 6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) + vite: 6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1) transitivePeerDependencies: - supports-color - typescript - vite@6.0.6(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1): + vite@6.0.11(@types/node@20.17.11)(terser@5.31.1)(yaml@2.6.1): dependencies: esbuild: 0.24.2 postcss: 8.4.49 diff --git a/web/packages/build/vite/config.ts b/web/packages/build/vite/config.ts index a429b6365aebd..9521631ddf189 100644 --- a/web/packages/build/vite/config.ts +++ b/web/packages/build/vite/config.ts @@ -54,9 +54,15 @@ export function createViteConfig( } } + const targetHostname = + target !== DEFAULT_PROXY_TARGET + ? new URL(`http://${target}`).hostname + : undefined; + const config: UserConfig = { clearScreen: false, server: { + allowedHosts: targetHostname ? [`.${targetHostname}`] : [], fs: { allow: [rootDirectory, '.'], }, diff --git a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.test.ts b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.test.ts index 791a9ce8d9f05..320aa7f864dd9 100644 --- a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.test.ts +++ b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.test.ts @@ -47,6 +47,7 @@ describe('validateRoleEditorModel', () => { expect(result.metadata.valid).toBe(true); expect(result.resources).toEqual([]); expect(result.rules).toEqual([]); + expect(result.isValid).toBe(true); }); test('valid complex model', () => { @@ -103,6 +104,7 @@ describe('validateRoleEditorModel', () => { expect(result.metadata.valid).toBe(true); expect(validity(result.resources)).toEqual([true, true, true, true, true]); expect(validity(result.rules)).toEqual([true]); + expect(result.isValid).toBe(true); }); test('invalid metadata', () => { @@ -110,6 +112,7 @@ describe('validateRoleEditorModel', () => { model.metadata.name = ''; const result = validateRoleEditorModel(model, undefined, undefined); expect(result.metadata.valid).toBe(false); + expect(result.isValid).toBe(false); }); test('invalid resource', () => { @@ -123,6 +126,7 @@ describe('validateRoleEditorModel', () => { ]; const result = validateRoleEditorModel(model, undefined, undefined); expect(validity(result.resources)).toEqual([false]); + expect(result.isValid).toBe(false); }); test('invalid access rule', () => { @@ -136,6 +140,7 @@ describe('validateRoleEditorModel', () => { ]; const result = validateRoleEditorModel(model, undefined, undefined); expect(validity(result.rules)).toEqual([false]); + expect(result.isValid).toBe(false); }); it('reuses previously computed section results', () => { @@ -145,6 +150,7 @@ describe('validateRoleEditorModel', () => { expect(result2.metadata).toBe(result1.metadata); expect(result2.resources).toBe(result1.resources); expect(result2.rules).toBe(result1.rules); + expect(result2.isValid).toBe(result1.isValid); }); }); diff --git a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.ts b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.ts index 0c1cd905eff43..051ca4988c12b 100644 --- a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.ts +++ b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.ts @@ -49,6 +49,10 @@ export type RoleEditorModelValidationResult = { metadata: MetadataValidationResult; resources: ResourceAccessValidationResult[]; rules: AccessRuleValidationResult[]; + /** + * isValid is true if all the fields in the validation result are valid. + */ + isValid: boolean; }; /** @@ -72,22 +76,32 @@ export function validateRoleEditorModel( previousModel: RoleEditorModel | undefined, previousResult: RoleEditorModelValidationResult | undefined ): RoleEditorModelValidationResult { + const metadataResult = validateMetadata( + model.metadata, + previousModel?.metadata, + previousResult?.metadata + ); + + const resourcesResult = validateResourceAccessList( + model.resources, + previousModel?.resources, + previousResult?.resources + ); + + const rulesResult = validateAccessRuleList( + model.rules, + previousModel?.rules, + previousResult?.rules + ); + return { - metadata: validateMetadata( - model.metadata, - previousModel?.metadata, - previousResult?.metadata - ), - resources: validateResourceAccessList( - model.resources, - previousModel?.resources, - previousResult?.resources - ), - rules: validateAccessRuleList( - model.rules, - previousModel?.rules, - previousResult?.rules - ), + isValid: + metadataResult.valid && + resourcesResult.every(r => r.valid) && + rulesResult.every(r => r.valid), + metadata: metadataResult, + resources: resourcesResult, + rules: rulesResult, }; } diff --git a/web/packages/teleport/src/Users/UserAddEdit/TraitsEditor.tsx b/web/packages/teleport/src/Users/UserAddEdit/TraitsEditor.tsx index 4179aeac18855..5237f6efa9872 100644 --- a/web/packages/teleport/src/Users/UserAddEdit/TraitsEditor.tsx +++ b/web/packages/teleport/src/Users/UserAddEdit/TraitsEditor.tsx @@ -42,6 +42,7 @@ const traitsPreset = [ 'kubernetes_users', 'logins', 'windows_logins', + 'github_orgs', ]; /** diff --git a/web/packages/teleterm/src/services/tshd/testHelpers.ts b/web/packages/teleterm/src/services/tshd/testHelpers.ts index 8cb15ec3e3701..34ccb2a3e7ef2 100644 --- a/web/packages/teleterm/src/services/tshd/testHelpers.ts +++ b/web/packages/teleterm/src/services/tshd/testHelpers.ts @@ -245,7 +245,7 @@ export const makeLoggedInUser = ( export const makeDatabaseGateway = ( props: Partial = {} ): tsh.Gateway => ({ - uri: '/gateways/foo', + uri: '/gateways/db', targetName: 'sales-production', targetUri: databaseUri, targetUser: 'alice', @@ -265,7 +265,7 @@ export const makeDatabaseGateway = ( export const makeKubeGateway = ( props: Partial = {} ): tsh.Gateway => ({ - uri: '/gateways/foo', + uri: '/gateways/kube', targetName: 'foo', targetUri: kubeUri, targetUser: '', @@ -285,7 +285,7 @@ export const makeKubeGateway = ( export const makeAppGateway = ( props: Partial = {} ): tsh.Gateway => ({ - uri: '/gateways/bar', + uri: '/gateways/app', targetName: 'sales-production', targetUri: appUri, localAddress: 'localhost', diff --git a/web/packages/teleterm/src/ui/AppInitializer/AppInitializer.test.tsx b/web/packages/teleterm/src/ui/AppInitializer/AppInitializer.test.tsx index 102a9d4d23e32..63d35c68a0b3a 100644 --- a/web/packages/teleterm/src/ui/AppInitializer/AppInitializer.test.tsx +++ b/web/packages/teleterm/src/ui/AppInitializer/AppInitializer.test.tsx @@ -147,10 +147,8 @@ test('activating a workspace via deep link overrides the previously active works await userEvent.click(dialogSuccessButton); // Check if the first activated workspace is the one from the deep link. - expect(await screen.findByTitle(/Current cluster:/)).toBeVisible(); - expect( - screen.queryByTitle(`Current cluster: ${deepLinkCluster.name}`) - ).toBeVisible(); + const el = await screen.findByTitle(/Open Profiles/); + expect(el.title).toContain(deepLinkCluster.name); }); test.each<{ diff --git a/web/packages/teleterm/src/ui/DocumentGateway/DocumentGateway.tsx b/web/packages/teleterm/src/ui/DocumentGateway/DocumentGateway.tsx index cb884bff8a924..ede152605fc3d 100644 --- a/web/packages/teleterm/src/ui/DocumentGateway/DocumentGateway.tsx +++ b/web/packages/teleterm/src/ui/DocumentGateway/DocumentGateway.tsx @@ -54,7 +54,7 @@ export function DocumentGateway(props: { const runCliCommand = () => { const command = getCliCommandArgv0(gateway.gatewayCliCommand); - const title = `${command} · ${doc.targetUser}@${doc.targetName}`; + const title = `${command} · ${doc.targetName} (${doc.targetUser})`; const cliDoc = documentsService.createGatewayCliDocument({ title, diff --git a/web/packages/teleterm/src/ui/StatusBar/ShareFeedback/ShareFeedback.tsx b/web/packages/teleterm/src/ui/StatusBar/ShareFeedback/ShareFeedback.tsx index 436a4d280c299..ed0d32f2957fd 100644 --- a/web/packages/teleterm/src/ui/StatusBar/ShareFeedback/ShareFeedback.tsx +++ b/web/packages/teleterm/src/ui/StatusBar/ShareFeedback/ShareFeedback.tsx @@ -50,7 +50,7 @@ export function ShareFeedback() { onClick={openShareFeedback} > {!hasBeenShareFeedbackOpened && } - + . */ +import { Fragment } from 'react'; + import { Flex, Text } from 'design'; import { AccessRequestCheckoutButton } from './AccessRequestCheckoutButton'; @@ -23,7 +25,7 @@ import { ShareFeedback } from './ShareFeedback'; import { useActiveDocumentClusterBreadcrumbs } from './useActiveDocumentClusterBreadcrumbs'; export function StatusBar() { - const clusterBreadcrumbs = useActiveDocumentClusterBreadcrumbs(); + const breadcrumbs = useActiveDocumentClusterBreadcrumbs(); return ( - - {clusterBreadcrumbs} - + {breadcrumbs && ( + name).join(' → ')} + > + {breadcrumbs.map((breadcrumb, index) => ( + + {breadcrumb.Icon && ( + + )} + {breadcrumb.name} + {index !== breadcrumbs.length - 1 && ( + + )} + + ))} + + )} + + diff --git a/web/packages/teleterm/src/ui/StatusBar/useActiveDocumentClusterBreadcrumbs.ts b/web/packages/teleterm/src/ui/StatusBar/useActiveDocumentClusterBreadcrumbs.ts index bd785cf51d72a..9d82b6dce151e 100644 --- a/web/packages/teleterm/src/ui/StatusBar/useActiveDocumentClusterBreadcrumbs.ts +++ b/web/packages/teleterm/src/ui/StatusBar/useActiveDocumentClusterBreadcrumbs.ts @@ -16,42 +16,57 @@ * along with this program. If not, see . */ -import { useAppContext } from 'teleterm/ui/appContextProvider'; +import { ComponentType, useCallback } from 'react'; + +import { IconProps } from 'design/Icon/Icon'; + import { getResourceUri, - useWorkspaceServiceState, + getStaticNameAndIcon, } from 'teleterm/ui/services/workspacesService'; import { routing } from 'teleterm/ui/uri'; -export function useActiveDocumentClusterBreadcrumbs(): string { - const ctx = useAppContext(); - useWorkspaceServiceState(); - ctx.clustersService.useState(); +import { useStoreSelector } from '../hooks/useStoreSelector'; - const activeDocument = ctx.workspacesService - .getActiveWorkspaceDocumentService() - ?.getActive(); +interface Breadcrumb { + name: string; + Icon?: ComponentType; +} - if (!activeDocument) { - return; - } +export function useActiveDocumentClusterBreadcrumbs(): Breadcrumb[] { + const activeDocument = useStoreSelector( + 'workspacesService', + useCallback(state => { + const workspace = state.workspaces[state.rootClusterUri]; + return workspace?.documents.find(d => d.uri === workspace?.location); + }, []) + ); + const resourceUri = activeDocument && getResourceUri(activeDocument); + const staticNameAndIcon = + activeDocument && getStaticNameAndIcon(activeDocument); + const clusterUri = resourceUri && routing.ensureClusterUri(resourceUri); + const rootClusterUri = + resourceUri && routing.ensureRootClusterUri(resourceUri); + + const cluster = useStoreSelector( + 'clustersService', + useCallback(state => state.clusters.get(clusterUri), [clusterUri]) + ); + const rootCluster = useStoreSelector( + 'clustersService', + useCallback(state => state.clusters.get(rootClusterUri), [rootClusterUri]) + ); - const resourceUri = getResourceUri(activeDocument); - if (!resourceUri) { + if (!cluster || !rootCluster || !staticNameAndIcon) { return; } - const clusterUri = routing.ensureClusterUri(resourceUri); - const rootClusterUri = routing.ensureRootClusterUri(resourceUri); - - const rootCluster = ctx.clustersService.findCluster(rootClusterUri); - const leafCluster = - clusterUri === rootClusterUri - ? undefined - : ctx.clustersService.findCluster(clusterUri); - - return [rootCluster, leafCluster] - .filter(Boolean) - .map(c => c.name) - .join(' > '); + return [ + { name: rootCluster.name }, + clusterUri !== rootClusterUri && { name: cluster.name }, + { + name: staticNameAndIcon.name, + Icon: staticNameAndIcon.Icon, + }, + ].filter(Boolean); } diff --git a/web/packages/teleterm/src/ui/TabHost/TabHost.story.tsx b/web/packages/teleterm/src/ui/TabHost/TabHost.story.tsx new file mode 100644 index 0000000000000..07b12cd25e69a --- /dev/null +++ b/web/packages/teleterm/src/ui/TabHost/TabHost.story.tsx @@ -0,0 +1,73 @@ +/** + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +import { Meta } from '@storybook/react'; +import { createRef } from 'react'; + +import { makeRootCluster } from 'teleterm/services/tshd/testHelpers'; +import { ResourcesContextProvider } from 'teleterm/ui/DocumentCluster/resourcesContext'; +import { MockAppContextProvider } from 'teleterm/ui/fixtures/MockAppContextProvider'; +import { MockAppContext } from 'teleterm/ui/fixtures/mocks'; +import { Document } from 'teleterm/ui/services/workspacesService'; +import { + makeDocumentAccessRequests, + makeDocumentAuthorizeWebSession, + makeDocumentCluster, + makeDocumentConnectMyComputer, + makeDocumentGatewayApp, + makeDocumentGatewayCliClient, + makeDocumentGatewayDatabase, + makeDocumentGatewayKube, + makeDocumentPtySession, + makeDocumentTshNode, +} from 'teleterm/ui/services/workspacesService/documentsService/testHelpers'; + +import { TabHostContainer } from './TabHost'; + +const meta: Meta = { + title: 'Teleterm/TabHost', +}; + +export default meta; + +const allDocuments: Document[] = [ + makeDocumentCluster(), + makeDocumentTshNode(), + makeDocumentConnectMyComputer(), + makeDocumentGatewayDatabase(), + makeDocumentGatewayApp(), + makeDocumentGatewayCliClient(), + makeDocumentGatewayKube(), + makeDocumentAccessRequests(), + makeDocumentPtySession(), + makeDocumentAuthorizeWebSession(), +]; + +const cluster = makeRootCluster(); + +export function Story() { + const ctx = new MockAppContext(); + ctx.addRootClusterWithDoc(cluster, allDocuments); + return ( + + + + + + ); +} diff --git a/web/packages/teleterm/src/ui/Tabs/TabItem.tsx b/web/packages/teleterm/src/ui/Tabs/TabItem.tsx index c69f5535c1658..43aad682a49c1 100644 --- a/web/packages/teleterm/src/ui/Tabs/TabItem.tsx +++ b/web/packages/teleterm/src/ui/Tabs/TabItem.tsx @@ -16,11 +16,12 @@ * along with this program. If not, see . */ -import { useRef } from 'react'; +import { ComponentType, useRef } from 'react'; import styled from 'styled-components'; import { ButtonIcon, Text } from 'design'; import * as Icons from 'design/Icon'; +import { IconProps } from 'design/Icon/Icon'; import { LinearProgress } from 'teleterm/ui/components/LinearProgress'; @@ -29,6 +30,7 @@ import { useTabDnD } from './useTabDnD'; type TabItemProps = { index?: number; name?: string; + Icon?: ComponentType; active?: boolean; nextActive?: boolean; closeTabTooltip?: string; @@ -75,24 +77,21 @@ export function TabItem(props: TabItemProps) { min-width: 0; `} > - - + <TabContent ref={ref} active={active} dragging={isDragging} title={name}> + {props.Icon && <props.Icon size="small" pr={1} />} + <Title color="inherit" fontWeight={500} fontSize="12px"> {name} {isLoading && active && } {onClose && ( (props.active ? 'flex' : 'none')}; `} onClick={handleClose} > @@ -115,13 +114,7 @@ export function NewTabItem(props: NewTabItemProps) { return ( - + @@ -141,8 +134,6 @@ const RelativeContainer = styled.div` const TabContent = styled.div<{ dragging?: boolean; active?: boolean; - // TODO(bl-nero): is this really used? Perhaps remove it. - canDrag?: boolean; }>` display: flex; z-index: 1; // covers shadow from the top @@ -150,6 +141,8 @@ const TabContent = styled.div<{ min-width: 0; width: 100%; height: 100%; + cursor: pointer; + padding-inline: 6px 4px; border-radius: 8px 8px 0 0; position: relative; opacity: ${props => (props.dragging ? 0 : 1)}; @@ -168,21 +161,15 @@ const TabContent = styled.div<{ &:focus { color: ${props => props.theme.colors.text.main}; transition: color 0.3s; + + > .close { + display: flex; + } } `; const Title = styled(Text)` - display: block; - cursor: pointer; - outline: none; - color: inherit; - font-family: inherit; - line-height: 32px; - background-color: transparent; white-space: nowrap; - padding-left: 12px; - border: none; - min-width: 0; width: 100%; `; diff --git a/web/packages/teleterm/src/ui/Tabs/Tabs.tsx b/web/packages/teleterm/src/ui/Tabs/Tabs.tsx index 509324cb7d192..a5ca3eb832ec2 100644 --- a/web/packages/teleterm/src/ui/Tabs/Tabs.tsx +++ b/web/packages/teleterm/src/ui/Tabs/Tabs.tsx @@ -22,7 +22,10 @@ import { Box } from 'design'; import { typography } from 'design/system'; import { TypographyProps } from 'design/system/typography'; -import { Document } from 'teleterm/ui/services/workspacesService'; +import { + Document, + getStaticNameAndIcon, +} from 'teleterm/ui/services/workspacesService'; import { NewTabItem, TabItem } from './TabItem'; @@ -50,6 +53,7 @@ export function Tabs(props: Props) { index={index} name={item.title} active={active} + Icon={getStaticNameAndIcon(item)?.Icon} nextActive={nextActive} onClick={() => onSelect(item)} onClose={() => onClose(item)} diff --git a/web/packages/teleterm/src/ui/fixtures/mocks.ts b/web/packages/teleterm/src/ui/fixtures/mocks.ts index 08a15915d9b1a..f746eead52eb5 100644 --- a/web/packages/teleterm/src/ui/fixtures/mocks.ts +++ b/web/packages/teleterm/src/ui/fixtures/mocks.ts @@ -45,17 +45,24 @@ export class MockAppContext extends AppContext { }); } - addRootClusterWithDoc(cluster: Cluster, doc: Document | undefined) { + addRootClusterWithDoc( + cluster: Cluster, + doc: Document[] | Document | undefined + ) { this.clustersService.setState(draftState => { draftState.clusters.set(cluster.uri, cluster); }); + const docs = Array.isArray(doc) ? doc : [doc]; this.workspacesService.setState(draftState => { draftState.rootClusterUri = cluster.uri; draftState.workspaces[cluster.uri] = { - documents: [doc].filter(Boolean), - location: doc?.uri, + documents: docs.filter(Boolean), + location: docs[0]?.uri, localClusterUri: cluster.uri, - accessRequests: undefined, + accessRequests: { + isBarCollapsed: true, + pending: { kind: 'role', roles: new Set() }, + }, }; }); } diff --git a/web/packages/teleterm/src/ui/services/connectionTracker/connectionTrackerService.ts b/web/packages/teleterm/src/ui/services/connectionTracker/connectionTrackerService.ts index e444834106036..af1d53feca6fd 100644 --- a/web/packages/teleterm/src/ui/services/connectionTracker/connectionTrackerService.ts +++ b/web/packages/teleterm/src/ui/services/connectionTracker/connectionTrackerService.ts @@ -265,6 +265,11 @@ export class ConnectionTrackerService extends ImmutableStore. */ +import { ComponentType } from 'react'; + +import { + Application, + Database, + Kubernetes, + Laptop, + ListAddCheck, + Server, + ShieldCheck, + Table, + Terminal, +} from 'design/Icon'; +import { IconProps } from 'design/Icon/Icon'; + import { ClusterOrResourceUri, isAppUri, @@ -100,7 +115,7 @@ export function getDocumentGatewayTitle(doc: DocumentGateway): string { switch (targetKind) { case 'db': { - return targetUser ? `${targetUser}@${targetName}` : targetName; + return targetUser ? `${targetName} (${targetUser})` : targetName; } case 'app': { return targetSubresourceName @@ -112,3 +127,79 @@ export function getDocumentGatewayTitle(doc: DocumentGateway): string { } } } + +/** + * Returns a name and icon of the document. + * If possible, the name is the title of a document, except for cases + * when it contains some additional values like cwd, or a shell name. + * At the moment, the name is used only in the status bar. + * The icon is used both in the status bar and the tabs. + */ +export function getStaticNameAndIcon( + document: Document +): { name: string; Icon: ComponentType } | undefined { + switch (document.kind) { + case 'doc.cluster': + return { + name: 'Resources', + Icon: Table, + }; + case 'doc.gateway_cli_client': + return { + name: document.title, + Icon: Database, + }; + case 'doc.gateway': + if (isDatabaseUri(document.targetUri)) { + return { + name: document.title, + Icon: Database, + }; + } + if (isAppUri(document.targetUri)) { + return { + name: document.title, + Icon: Application, + }; + } + return; + case 'doc.gateway_kube': + return { + name: routing.parseKubeUri(document.targetUri).params.kubeId, + Icon: Kubernetes, + }; + case 'doc.terminal_tsh_node': + return isDocumentTshNodeWithServerId(document) + ? { + name: document.title, + Icon: Server, + } + : undefined; + case 'doc.access_requests': + return { + name: document.title, + Icon: ListAddCheck, + }; + case 'doc.terminal_shell': + return { + name: 'Terminal', + Icon: Terminal, + }; + case 'doc.connect_my_computer': + return { + name: document.title, + Icon: Laptop, + }; + case 'doc.authorize_web_session': + return { + name: document.title, + Icon: ShieldCheck, + }; + case 'doc.blank': + case 'doc.terminal_tsh_kube': + return undefined; + default: + document satisfies never; + return undefined; + } +} diff --git a/web/packages/teleterm/src/ui/services/workspacesService/documentsService/testHelpers.ts b/web/packages/teleterm/src/ui/services/workspacesService/documentsService/testHelpers.ts index c27a5acad517a..87e4a76e8c5ac 100644 --- a/web/packages/teleterm/src/ui/services/workspacesService/documentsService/testHelpers.ts +++ b/web/packages/teleterm/src/ui/services/workspacesService/documentsService/testHelpers.ts @@ -16,16 +16,32 @@ * along with this program. If not, see . */ -import { makeRootCluster } from 'teleterm/services/tshd/testHelpers'; +import { + makeAppGateway, + makeDatabaseGateway, + makeKubeGateway, + makeRootCluster, + makeServer, +} from 'teleterm/services/tshd/testHelpers'; -import { DocumentCluster } from './types'; +import { + DocumentAccessRequests, + DocumentAuthorizeWebSession, + DocumentCluster, + DocumentConnectMyComputer, + DocumentGateway, + DocumentGatewayCliClient, + DocumentGatewayKube, + DocumentPtySession, + DocumentTshNodeWithServerId, +} from './types'; export function makeDocumentCluster( props?: Partial ): DocumentCluster { return { kind: 'doc.cluster', - uri: '/docs/unique-uri', + uri: '/docs/cluster', title: 'teleport-ent.asteroid.earth', clusterUri: makeRootCluster().uri, queryParams: { @@ -40,3 +56,153 @@ export function makeDocumentCluster( ...props, }; } + +export function makeDocumentGatewayDatabase( + props?: Partial +): DocumentGateway { + const gw = makeDatabaseGateway(); + return { + kind: 'doc.gateway', + uri: '/docs/gateway_database', + gatewayUri: '/gateways/db-gateway', + title: 'aurora (sre)', + targetUri: gw.targetUri, + port: gw.localPort, + targetName: gw.targetName, + targetUser: gw.targetUser, + status: '', + targetSubresourceName: gw.targetSubresourceName, + origin: 'connection_list', + ...props, + }; +} + +export function makeDocumentGatewayApp( + props?: Partial +): DocumentGateway { + const gw = makeAppGateway(); + return { + kind: 'doc.gateway', + uri: '/docs/gateway_app', + title: 'grafana', + targetUri: gw.targetUri, + gatewayUri: gw.uri, + port: gw.localPort, + targetName: gw.targetName, + targetUser: gw.targetUser, + status: '', + targetSubresourceName: gw.targetSubresourceName, + origin: 'connection_list', + ...props, + }; +} + +export function makeDocumentPtySession( + props?: Partial +): DocumentPtySession { + return { + kind: 'doc.terminal_shell', + uri: '/docs/terminal_shell', + title: '/Users/alice/Documents', + rootClusterId: 'teleport-local', + ...props, + }; +} + +export function makeDocumentTshNode( + props?: Partial +): DocumentTshNodeWithServerId { + return { + kind: 'doc.terminal_tsh_node', + uri: '/docs/terminal_tsh_node', + title: 'alice@node', + serverUri: makeServer().uri, + status: '', + rootClusterId: 'teleport-local', + leafClusterId: '', + origin: 'connection_list', + serverId: '1234abcd-1234-abcd-1234-abcd1234abcd', + ...props, + }; +} + +export function makeDocumentGatewayCliClient( + props?: Partial +): DocumentGatewayCliClient { + const gw = makeDatabaseGateway(); + return { + kind: 'doc.gateway_cli_client', + uri: '/docs/gateway_cli_client', + title: 'psql · aurora (sre)', + rootClusterId: 'teleport-local', + leafClusterId: '', + targetProtocol: gw.protocol, + targetUri: gw.targetUri, + targetName: gw.targetName, + targetUser: gw.targetUser, + status: '', + ...props, + }; +} + +export function makeDocumentGatewayKube( + props?: Partial +): DocumentGatewayKube { + const gw = makeKubeGateway(); + return { + kind: 'doc.gateway_kube', + uri: '/docs/gateway_kube', + title: 'cookie', + rootClusterId: 'teleport-local', + leafClusterId: '', + targetUri: gw.targetUri, + status: '', + origin: 'connection_list', + ...props, + }; +} + +export function makeDocumentAccessRequests( + props?: Partial +): DocumentAccessRequests { + return { + kind: 'doc.access_requests', + uri: '/docs/access_requests', + title: 'Access Requests', + clusterUri: makeRootCluster().uri, + state: 'browsing', + requestId: '1231', + ...props, + }; +} + +export function makeDocumentConnectMyComputer( + props?: Partial +): DocumentConnectMyComputer { + return { + kind: 'doc.connect_my_computer', + uri: '/docs/connect-my-computer', + rootClusterUri: makeRootCluster().uri, + status: '', + title: 'Connect My Computer', + ...props, + }; +} + +export function makeDocumentAuthorizeWebSession( + props?: Partial +): DocumentAuthorizeWebSession { + return { + kind: 'doc.authorize_web_session', + uri: '/docs/authorize-web-session', + title: 'Authorize Web Session', + rootClusterUri: makeRootCluster().uri, + webSessionRequest: { + id: '123', + username: 'alice', + token: 'secret-token', + redirectUri: '', + }, + ...props, + }; +} diff --git a/web/packages/teleterm/src/ui/services/workspacesService/documentsService/types.ts b/web/packages/teleterm/src/ui/services/workspacesService/documentsService/types.ts index d39f12167515d..46162821b5721 100644 --- a/web/packages/teleterm/src/ui/services/workspacesService/documentsService/types.ts +++ b/web/packages/teleterm/src/ui/services/workspacesService/documentsService/types.ts @@ -288,6 +288,10 @@ export type Document = | DocumentConnectMyComputer | DocumentAuthorizeWebSession; +/** + * @deprecated DocumentTshNode is supposed to be simplified to just DocumentTshNodeWithServerId. + * See the comment for DocumentTshNodeWithLoginHost for more details. + */ export function isDocumentTshNodeWithLoginHost( doc: Document ): doc is DocumentTshNodeWithLoginHost { @@ -296,6 +300,10 @@ export function isDocumentTshNodeWithLoginHost( return doc.kind === 'doc.terminal_tsh_node' && !('serverId' in doc); } +/** + * @deprecated DocumentTshNode is supposed to be simplified to just DocumentTshNodeWithServerId. + * See the comment for DocumentTshNodeWithLoginHost for more details. + */ export function isDocumentTshNodeWithServerId( doc: Document ): doc is DocumentTshNodeWithServerId {